xref: /titanic_44/usr/src/uts/common/io/hxge/hxge_main.c (revision 2d6b5ea734bb47d251c82670646fde46af15fd69)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28  */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31 
32 /*
33  * PSARC/2007/453 MSI-X interrupt limit override
34  * (This PSARC case is limited to MSI-X vectors
35  *  and SPARC platforms only).
36  */
37 #if defined(_BIG_ENDIAN)
38 uint32_t hxge_msi_enable = 2;
39 #else
40 uint32_t hxge_msi_enable = 1;
41 #endif
42 
43 /*
44  * Globals: tunable parameters (/etc/system or adb)
45  *
46  */
47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
48 uint32_t hxge_rbr_spare_size = 0;
49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
55 
56 static hxge_os_mutex_t hxgedebuglock;
57 static int hxge_debug_init = 0;
58 
59 /*
60  * Debugging flags:
61  *		hxge_no_tx_lb : transmit load balancing
62  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
63  *				   1 - From the Stack
64  *				   2 - Destination IP Address
65  */
66 uint32_t hxge_no_tx_lb = 0;
67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
68 
69 /*
70  * Add tunable to reduce the amount of time spent in the
71  * ISR doing Rx Processing.
72  */
73 uint32_t hxge_max_rx_pkts = 256;
74 
75 /*
76  * Tunables to manage the receive buffer blocks.
77  *
78  * hxge_rx_threshold_hi: copy all buffers.
79  * hxge_rx_bcopy_size_type: receive buffer block size type.
80  * hxge_rx_threshold_lo: copy only up to tunable block size type.
81  */
82 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
83 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
84 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
85 
86 rtrace_t hpi_rtracebuf;
87 
88 /*
89  * Function Prototypes
90  */
91 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
92 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
93 static void hxge_unattach(p_hxge_t);
94 
95 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
96 
97 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
98 static void hxge_destroy_mutexes(p_hxge_t);
99 
100 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
101 static void hxge_unmap_regs(p_hxge_t hxgep);
102 
103 hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
104 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
105 static void hxge_remove_intrs(p_hxge_t hxgep);
106 static void hxge_remove_soft_intrs(p_hxge_t hxgep);
107 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
108 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
109 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
110 void hxge_intrs_enable(p_hxge_t hxgep);
111 static void hxge_intrs_disable(p_hxge_t hxgep);
112 static void hxge_suspend(p_hxge_t);
113 static hxge_status_t hxge_resume(p_hxge_t);
114 hxge_status_t hxge_setup_dev(p_hxge_t);
115 static void hxge_destroy_dev(p_hxge_t);
116 hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
117 static void hxge_free_mem_pool(p_hxge_t);
118 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
119 static void hxge_free_rx_mem_pool(p_hxge_t);
120 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
121 static void hxge_free_tx_mem_pool(p_hxge_t);
122 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
123     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
124     p_hxge_dma_common_t);
125 static void hxge_dma_mem_free(p_hxge_dma_common_t);
126 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
127     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
128 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
129 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
130     p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
131 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
132 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
133     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
134 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
135 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
136     p_hxge_dma_common_t *, size_t);
137 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
138 static int hxge_init_common_dev(p_hxge_t);
139 static void hxge_uninit_common_dev(p_hxge_t);
140 
141 /*
142  * The next declarations are for the GLDv3 interface.
143  */
144 static int hxge_m_start(void *);
145 static void hxge_m_stop(void *);
146 static int hxge_m_unicst(void *, const uint8_t *);
147 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
148 static int hxge_m_promisc(void *, boolean_t);
149 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
150 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
151 
152 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
153 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
154 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
155     uint_t pr_valsize, const void *pr_val);
156 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
157     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *);
158 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num,
159     uint_t pr_valsize, void *pr_val);
160 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
161     uint_t pr_valsize, const void *pr_val);
162 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
163     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
164 static void hxge_link_poll(void *arg);
165 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
166 static void hxge_msix_init(p_hxge_t hxgep);
167 static void hxge_store_msix_table(p_hxge_t hxgep);
168 static void hxge_check_1entry_msix_table(p_hxge_t hxgep, int msix_index);
169 
170 mac_priv_prop_t hxge_priv_props[] = {
171 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
172 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
173 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
174 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
175 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
176 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
177 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
178 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
179 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
180 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
181 };
182 
183 #define	HXGE_MAX_PRIV_PROPS	\
184 	(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
185 
186 #define	HXGE_MAGIC	0x4E584745UL
187 #define	MAX_DUMP_SZ 256
188 
189 #define	HXGE_M_CALLBACK_FLAGS	\
190 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
191 
192 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
193 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
194 
195 static mac_callbacks_t hxge_m_callbacks = {
196 	HXGE_M_CALLBACK_FLAGS,
197 	hxge_m_stat,
198 	hxge_m_start,
199 	hxge_m_stop,
200 	hxge_m_promisc,
201 	hxge_m_multicst,
202 	hxge_m_unicst,
203 	hxge_m_tx,
204 	hxge_m_ioctl,
205 	hxge_m_getcapab,
206 	NULL,
207 	NULL,
208 	hxge_m_setprop,
209 	hxge_m_getprop
210 };
211 
212 /* PSARC/2007/453 MSI-X interrupt limit override. */
213 #define	HXGE_MSIX_REQUEST_10G	8
214 static int hxge_create_msi_property(p_hxge_t);
215 
216 /* Enable debug messages as necessary. */
217 uint64_t hxge_debug_level = 0;
218 
219 /*
220  * This list contains the instance structures for the Hydra
221  * devices present in the system. The lock exists to guarantee
222  * mutually exclusive access to the list.
223  */
224 void *hxge_list = NULL;
225 void *hxge_hw_list = NULL;
226 hxge_os_mutex_t hxge_common_lock;
227 
228 extern uint64_t hpi_debug_level;
229 
230 extern hxge_status_t hxge_ldgv_init();
231 extern hxge_status_t hxge_ldgv_uninit();
232 extern hxge_status_t hxge_intr_ldgv_init();
233 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
234     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
235 extern void hxge_fm_fini(p_hxge_t hxgep);
236 
237 /*
238  * Count used to maintain the number of buffers being used
239  * by Hydra instances and loaned up to the upper layers.
240  */
241 uint32_t hxge_mblks_pending = 0;
242 
243 /*
244  * Device register access attributes for PIO.
245  */
246 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
247 	DDI_DEVICE_ATTR_V0,
248 	DDI_STRUCTURE_LE_ACC,
249 	DDI_STRICTORDER_ACC,
250 };
251 
252 /*
253  * Device descriptor access attributes for DMA.
254  */
255 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
256 	DDI_DEVICE_ATTR_V0,
257 	DDI_STRUCTURE_LE_ACC,
258 	DDI_STRICTORDER_ACC
259 };
260 
261 /*
262  * Device buffer access attributes for DMA.
263  */
264 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
265 	DDI_DEVICE_ATTR_V0,
266 	DDI_STRUCTURE_BE_ACC,
267 	DDI_STRICTORDER_ACC
268 };
269 
270 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
271 	DMA_ATTR_V0,		/* version number. */
272 	0,			/* low address */
273 	0xffffffffffffffff,	/* high address */
274 	0xffffffffffffffff,	/* address counter max */
275 	0x80000,		/* alignment */
276 	0xfc00fc,		/* dlim_burstsizes */
277 	0x1,			/* minimum transfer size */
278 	0xffffffffffffffff,	/* maximum transfer size */
279 	0xffffffffffffffff,	/* maximum segment size */
280 	1,			/* scatter/gather list length */
281 	(unsigned int)1,	/* granularity */
282 	0			/* attribute flags */
283 };
284 
285 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
286 	DMA_ATTR_V0,		/* version number. */
287 	0,			/* low address */
288 	0xffffffffffffffff,	/* high address */
289 	0xffffffffffffffff,	/* address counter max */
290 	0x100000,		/* alignment */
291 	0xfc00fc,		/* dlim_burstsizes */
292 	0x1,			/* minimum transfer size */
293 	0xffffffffffffffff,	/* maximum transfer size */
294 	0xffffffffffffffff,	/* maximum segment size */
295 	1,			/* scatter/gather list length */
296 	(unsigned int)1,	/* granularity */
297 	0			/* attribute flags */
298 };
299 
300 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
301 	DMA_ATTR_V0,		/* version number. */
302 	0,			/* low address */
303 	0xffffffffffffffff,	/* high address */
304 	0xffffffffffffffff,	/* address counter max */
305 	0x40000,		/* alignment */
306 	0xfc00fc,		/* dlim_burstsizes */
307 	0x1,			/* minimum transfer size */
308 	0xffffffffffffffff,	/* maximum transfer size */
309 	0xffffffffffffffff,	/* maximum segment size */
310 	1,			/* scatter/gather list length */
311 	(unsigned int)1,	/* granularity */
312 	0			/* attribute flags */
313 };
314 
315 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
316 	DMA_ATTR_V0,		/* version number. */
317 	0,			/* low address */
318 	0xffffffffffffffff,	/* high address */
319 	0xffffffffffffffff,	/* address counter max */
320 #if defined(_BIG_ENDIAN)
321 	0x2000,			/* alignment */
322 #else
323 	0x1000,			/* alignment */
324 #endif
325 	0xfc00fc,		/* dlim_burstsizes */
326 	0x1,			/* minimum transfer size */
327 	0xffffffffffffffff,	/* maximum transfer size */
328 	0xffffffffffffffff,	/* maximum segment size */
329 	5,			/* scatter/gather list length */
330 	(unsigned int)1,	/* granularity */
331 	0			/* attribute flags */
332 };
333 
334 ddi_dma_attr_t hxge_tx_dma_attr = {
335 	DMA_ATTR_V0,		/* version number. */
336 	0,			/* low address */
337 	0xffffffffffffffff,	/* high address */
338 	0xffffffffffffffff,	/* address counter max */
339 #if defined(_BIG_ENDIAN)
340 	0x2000,			/* alignment */
341 #else
342 	0x1000,			/* alignment */
343 #endif
344 	0xfc00fc,		/* dlim_burstsizes */
345 	0x1,			/* minimum transfer size */
346 	0xffffffffffffffff,	/* maximum transfer size */
347 	0xffffffffffffffff,	/* maximum segment size */
348 	5,			/* scatter/gather list length */
349 	(unsigned int)1,	/* granularity */
350 	0			/* attribute flags */
351 };
352 
353 ddi_dma_attr_t hxge_rx_dma_attr = {
354 	DMA_ATTR_V0,		/* version number. */
355 	0,			/* low address */
356 	0xffffffffffffffff,	/* high address */
357 	0xffffffffffffffff,	/* address counter max */
358 	0x10000,		/* alignment */
359 	0xfc00fc,		/* dlim_burstsizes */
360 	0x1,			/* minimum transfer size */
361 	0xffffffffffffffff,	/* maximum transfer size */
362 	0xffffffffffffffff,	/* maximum segment size */
363 	1,			/* scatter/gather list length */
364 	(unsigned int)1,	/* granularity */
365 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
366 };
367 
368 ddi_dma_lim_t hxge_dma_limits = {
369 	(uint_t)0,		/* dlim_addr_lo */
370 	(uint_t)0xffffffff,	/* dlim_addr_hi */
371 	(uint_t)0xffffffff,	/* dlim_cntr_max */
372 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
373 	0x1,			/* dlim_minxfer */
374 	1024			/* dlim_speed */
375 };
376 
377 dma_method_t hxge_force_dma = DVMA;
378 
379 /*
380  * dma chunk sizes.
381  *
382  * Try to allocate the largest possible size
383  * so that fewer number of dma chunks would be managed
384  */
385 size_t alloc_sizes[] = {
386     0x1000, 0x2000, 0x4000, 0x8000,
387     0x10000, 0x20000, 0x40000, 0x80000,
388     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
389 };
390 
391 /*
392  * Translate "dev_t" to a pointer to the associated "dev_info_t".
393  */
394 static int
395 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
396 {
397 	p_hxge_t	hxgep = NULL;
398 	int		instance;
399 	int		status = DDI_SUCCESS;
400 
401 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
402 
403 	/*
404 	 * Get the device instance since we'll need to setup or retrieve a soft
405 	 * state for this instance.
406 	 */
407 	instance = ddi_get_instance(dip);
408 
409 	switch (cmd) {
410 	case DDI_ATTACH:
411 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
412 		break;
413 
414 	case DDI_RESUME:
415 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
416 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
417 		if (hxgep == NULL) {
418 			status = DDI_FAILURE;
419 			break;
420 		}
421 		if (hxgep->dip != dip) {
422 			status = DDI_FAILURE;
423 			break;
424 		}
425 		if (hxgep->suspended == DDI_PM_SUSPEND) {
426 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
427 		} else {
428 			(void) hxge_resume(hxgep);
429 		}
430 		goto hxge_attach_exit;
431 
432 	case DDI_PM_RESUME:
433 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
434 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
435 		if (hxgep == NULL) {
436 			status = DDI_FAILURE;
437 			break;
438 		}
439 		if (hxgep->dip != dip) {
440 			status = DDI_FAILURE;
441 			break;
442 		}
443 		(void) hxge_resume(hxgep);
444 		goto hxge_attach_exit;
445 
446 	default:
447 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
448 		status = DDI_FAILURE;
449 		goto hxge_attach_exit;
450 	}
451 
452 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
453 		status = DDI_FAILURE;
454 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
455 		    "ddi_soft_state_zalloc failed"));
456 		goto hxge_attach_exit;
457 	}
458 
459 	hxgep = ddi_get_soft_state(hxge_list, instance);
460 	if (hxgep == NULL) {
461 		status = HXGE_ERROR;
462 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
463 		    "ddi_get_soft_state failed"));
464 		goto hxge_attach_fail2;
465 	}
466 
467 	hxgep->drv_state = 0;
468 	hxgep->dip = dip;
469 	hxgep->instance = instance;
470 	hxgep->p_dip = ddi_get_parent(dip);
471 	hxgep->hxge_debug_level = hxge_debug_level;
472 	hpi_debug_level = hxge_debug_level;
473 
474 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
475 	    &hxge_rx_dma_attr);
476 
477 	status = hxge_map_regs(hxgep);
478 	if (status != HXGE_OK) {
479 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
480 		goto hxge_attach_fail3;
481 	}
482 
483 	/* Scrub the MSI-X memory */
484 	hxge_msix_init(hxgep);
485 
486 	status = hxge_init_common_dev(hxgep);
487 	if (status != HXGE_OK) {
488 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
489 		    "hxge_init_common_dev failed"));
490 		goto hxge_attach_fail4;
491 	}
492 
493 	/*
494 	 * Setup the Ndd parameters for this instance.
495 	 */
496 	hxge_init_param(hxgep);
497 
498 	/*
499 	 * Setup Register Tracing Buffer.
500 	 */
501 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
502 
503 	/* init stats ptr */
504 	hxge_init_statsp(hxgep);
505 
506 	status = hxge_setup_mutexes(hxgep);
507 	if (status != HXGE_OK) {
508 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
509 		goto hxge_attach_fail;
510 	}
511 
512 	status = hxge_get_config_properties(hxgep);
513 	if (status != HXGE_OK) {
514 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
515 		goto hxge_attach_fail;
516 	}
517 
518 	/*
519 	 * Setup the Kstats for the driver.
520 	 */
521 	hxge_setup_kstats(hxgep);
522 	hxge_setup_param(hxgep);
523 
524 	status = hxge_setup_system_dma_pages(hxgep);
525 	if (status != HXGE_OK) {
526 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
527 		goto hxge_attach_fail;
528 	}
529 
530 	hxge_hw_id_init(hxgep);
531 	hxge_hw_init_niu_common(hxgep);
532 
533 	status = hxge_setup_dev(hxgep);
534 	if (status != DDI_SUCCESS) {
535 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
536 		goto hxge_attach_fail;
537 	}
538 
539 	status = hxge_add_intrs(hxgep);
540 	if (status != DDI_SUCCESS) {
541 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
542 		goto hxge_attach_fail;
543 	}
544 
545 	status = hxge_add_soft_intrs(hxgep);
546 	if (status != DDI_SUCCESS) {
547 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
548 		goto hxge_attach_fail;
549 	}
550 
551 	/*
552 	 * Enable interrupts.
553 	 */
554 	hxge_intrs_enable(hxgep);
555 
556 	/* Keep copy of MSIx table written */
557 	hxge_store_msix_table(hxgep);
558 
559 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
560 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
561 		    "unable to register to mac layer (%d)", status));
562 		goto hxge_attach_fail;
563 	}
564 	mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
565 
566 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
567 	    instance));
568 
569 	goto hxge_attach_exit;
570 
571 hxge_attach_fail:
572 	hxge_unattach(hxgep);
573 	goto hxge_attach_fail1;
574 
575 hxge_attach_fail5:
576 	/*
577 	 * Tear down the ndd parameters setup.
578 	 */
579 	hxge_destroy_param(hxgep);
580 
581 	/*
582 	 * Tear down the kstat setup.
583 	 */
584 	hxge_destroy_kstats(hxgep);
585 
586 hxge_attach_fail4:
587 	if (hxgep->hxge_hw_p) {
588 		hxge_uninit_common_dev(hxgep);
589 		hxgep->hxge_hw_p = NULL;
590 	}
591 hxge_attach_fail3:
592 	/*
593 	 * Unmap the register setup.
594 	 */
595 	hxge_unmap_regs(hxgep);
596 
597 	hxge_fm_fini(hxgep);
598 
599 hxge_attach_fail2:
600 	ddi_soft_state_free(hxge_list, hxgep->instance);
601 
602 hxge_attach_fail1:
603 	if (status != HXGE_OK)
604 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
605 	hxgep = NULL;
606 
607 hxge_attach_exit:
608 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
609 	    status));
610 
611 	return (status);
612 }
613 
614 static int
615 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
616 {
617 	int		status = DDI_SUCCESS;
618 	int		instance;
619 	p_hxge_t	hxgep = NULL;
620 
621 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
622 	instance = ddi_get_instance(dip);
623 	hxgep = ddi_get_soft_state(hxge_list, instance);
624 	if (hxgep == NULL) {
625 		status = DDI_FAILURE;
626 		goto hxge_detach_exit;
627 	}
628 
629 	switch (cmd) {
630 	case DDI_DETACH:
631 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
632 		break;
633 
634 	case DDI_PM_SUSPEND:
635 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
636 		hxgep->suspended = DDI_PM_SUSPEND;
637 		hxge_suspend(hxgep);
638 		break;
639 
640 	case DDI_SUSPEND:
641 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
642 		if (hxgep->suspended != DDI_PM_SUSPEND) {
643 			hxgep->suspended = DDI_SUSPEND;
644 			hxge_suspend(hxgep);
645 		}
646 		break;
647 
648 	default:
649 		status = DDI_FAILURE;
650 		break;
651 	}
652 
653 	if (cmd != DDI_DETACH)
654 		goto hxge_detach_exit;
655 
656 	/*
657 	 * Stop the xcvr polling.
658 	 */
659 	hxgep->suspended = cmd;
660 
661 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
662 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
663 		    "<== hxge_detach status = 0x%08X", status));
664 		return (DDI_FAILURE);
665 	}
666 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
667 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
668 
669 	hxge_unattach(hxgep);
670 	hxgep = NULL;
671 
672 hxge_detach_exit:
673 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
674 	    status));
675 
676 	return (status);
677 }
678 
679 static void
680 hxge_unattach(p_hxge_t hxgep)
681 {
682 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
683 
684 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
685 		return;
686 	}
687 
688 	if (hxgep->hxge_hw_p) {
689 		hxge_uninit_common_dev(hxgep);
690 		hxgep->hxge_hw_p = NULL;
691 	}
692 
693 	if (hxgep->hxge_timerid) {
694 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
695 		hxgep->hxge_timerid = 0;
696 	}
697 
698 	/* Stop any further interrupts. */
699 	hxge_remove_intrs(hxgep);
700 
701 	/* Remove soft interrups */
702 	hxge_remove_soft_intrs(hxgep);
703 
704 	/* Stop the device and free resources. */
705 	hxge_destroy_dev(hxgep);
706 
707 	/* Tear down the ndd parameters setup. */
708 	hxge_destroy_param(hxgep);
709 
710 	/* Tear down the kstat setup. */
711 	hxge_destroy_kstats(hxgep);
712 
713 	/*
714 	 * Remove the list of ndd parameters which were setup during attach.
715 	 */
716 	if (hxgep->dip) {
717 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
718 		    " hxge_unattach: remove all properties"));
719 		(void) ddi_prop_remove_all(hxgep->dip);
720 	}
721 
722 	/*
723 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
724 	 * previous state before unmapping the registers.
725 	 */
726 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
727 	HXGE_DELAY(1000);
728 
729 	/*
730 	 * Unmap the register setup.
731 	 */
732 	hxge_unmap_regs(hxgep);
733 
734 	hxge_fm_fini(hxgep);
735 
736 	/* Destroy all mutexes.  */
737 	hxge_destroy_mutexes(hxgep);
738 
739 	/*
740 	 * Free the soft state data structures allocated with this instance.
741 	 */
742 	ddi_soft_state_free(hxge_list, hxgep->instance);
743 
744 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
745 }
746 
747 static hxge_status_t
748 hxge_map_regs(p_hxge_t hxgep)
749 {
750 	int		ddi_status = DDI_SUCCESS;
751 	p_dev_regs_t	dev_regs;
752 
753 #ifdef	HXGE_DEBUG
754 	char		*sysname;
755 #endif
756 
757 	off_t		regsize;
758 	hxge_status_t	status = HXGE_OK;
759 	int		nregs;
760 
761 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
762 
763 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
764 		return (HXGE_ERROR);
765 
766 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
767 
768 	hxgep->dev_regs = NULL;
769 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
770 	dev_regs->hxge_regh = NULL;
771 	dev_regs->hxge_pciregh = NULL;
772 	dev_regs->hxge_msix_regh = NULL;
773 
774 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
775 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
776 	    "hxge_map_regs: pci config size 0x%x", regsize));
777 
778 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
779 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
780 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
781 	if (ddi_status != DDI_SUCCESS) {
782 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
783 		    "ddi_map_regs, hxge bus config regs failed"));
784 		goto hxge_map_regs_fail0;
785 	}
786 
787 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
788 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
789 	    dev_regs->hxge_pciregp,
790 	    dev_regs->hxge_pciregh));
791 
792 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
793 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
794 	    "hxge_map_regs: pio size 0x%x", regsize));
795 
796 	/* set up the device mapped register */
797 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
798 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
799 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
800 
801 	if (ddi_status != DDI_SUCCESS) {
802 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
803 		    "ddi_map_regs for Hydra global reg failed"));
804 		goto hxge_map_regs_fail1;
805 	}
806 
807 	/* set up the msi/msi-x mapped register */
808 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
809 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
810 	    "hxge_map_regs: msix size 0x%x", regsize));
811 
812 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
813 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
814 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
815 
816 	if (ddi_status != DDI_SUCCESS) {
817 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
818 		    "ddi_map_regs for msi reg failed"));
819 		goto hxge_map_regs_fail2;
820 	}
821 
822 	hxgep->dev_regs = dev_regs;
823 
824 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
825 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
826 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
827 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
828 
829 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
830 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
831 
832 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
833 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
834 
835 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
836 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
837 
838 	goto hxge_map_regs_exit;
839 
840 hxge_map_regs_fail3:
841 	if (dev_regs->hxge_msix_regh) {
842 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
843 	}
844 
845 hxge_map_regs_fail2:
846 	if (dev_regs->hxge_regh) {
847 		ddi_regs_map_free(&dev_regs->hxge_regh);
848 	}
849 
850 hxge_map_regs_fail1:
851 	if (dev_regs->hxge_pciregh) {
852 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
853 	}
854 
855 hxge_map_regs_fail0:
856 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
857 	kmem_free(dev_regs, sizeof (dev_regs_t));
858 
859 hxge_map_regs_exit:
860 	if (ddi_status != DDI_SUCCESS)
861 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
862 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
863 	return (status);
864 }
865 
866 static void
867 hxge_unmap_regs(p_hxge_t hxgep)
868 {
869 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
870 	if (hxgep->dev_regs) {
871 		if (hxgep->dev_regs->hxge_pciregh) {
872 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
873 			    "==> hxge_unmap_regs: bus"));
874 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
875 			hxgep->dev_regs->hxge_pciregh = NULL;
876 		}
877 
878 		if (hxgep->dev_regs->hxge_regh) {
879 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
880 			    "==> hxge_unmap_regs: device registers"));
881 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
882 			hxgep->dev_regs->hxge_regh = NULL;
883 		}
884 
885 		if (hxgep->dev_regs->hxge_msix_regh) {
886 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
887 			    "==> hxge_unmap_regs: device interrupts"));
888 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
889 			hxgep->dev_regs->hxge_msix_regh = NULL;
890 		}
891 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
892 		hxgep->dev_regs = NULL;
893 	}
894 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
895 }
896 
897 static hxge_status_t
898 hxge_setup_mutexes(p_hxge_t hxgep)
899 {
900 	int		ddi_status = DDI_SUCCESS;
901 	hxge_status_t	status = HXGE_OK;
902 
903 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
904 
905 	/*
906 	 * Get the interrupt cookie so the mutexes can be Initialised.
907 	 */
908 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
909 	    &hxgep->interrupt_cookie);
910 
911 	if (ddi_status != DDI_SUCCESS) {
912 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
913 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
914 		goto hxge_setup_mutexes_exit;
915 	}
916 
917 	/*
918 	 * Initialize mutex's for this device.
919 	 */
920 	MUTEX_INIT(hxgep->genlock, NULL,
921 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
922 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
923 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
924 	RW_INIT(&hxgep->filter_lock, NULL,
925 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
926 	MUTEX_INIT(&hxgep->pio_lock, NULL,
927 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
928 	MUTEX_INIT(&hxgep->timeout.lock, NULL,
929 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
930 
931 hxge_setup_mutexes_exit:
932 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
933 	    "<== hxge_setup_mutexes status = %x", status));
934 
935 	if (ddi_status != DDI_SUCCESS)
936 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
937 
938 	return (status);
939 }
940 
941 static void
942 hxge_destroy_mutexes(p_hxge_t hxgep)
943 {
944 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
945 	RW_DESTROY(&hxgep->filter_lock);
946 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
947 	MUTEX_DESTROY(hxgep->genlock);
948 	MUTEX_DESTROY(&hxgep->pio_lock);
949 	MUTEX_DESTROY(&hxgep->timeout.lock);
950 
951 	if (hxge_debug_init == 1) {
952 		MUTEX_DESTROY(&hxgedebuglock);
953 		hxge_debug_init = 0;
954 	}
955 
956 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
957 }
958 
959 hxge_status_t
960 hxge_init(p_hxge_t hxgep)
961 {
962 	hxge_status_t status = HXGE_OK;
963 
964 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
965 
966 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
967 		return (status);
968 	}
969 
970 	/*
971 	 * Allocate system memory for the receive/transmit buffer blocks and
972 	 * receive/transmit descriptor rings.
973 	 */
974 	status = hxge_alloc_mem_pool(hxgep);
975 	if (status != HXGE_OK) {
976 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
977 		goto hxge_init_fail1;
978 	}
979 
980 	/*
981 	 * Initialize and enable TXDMA channels.
982 	 */
983 	status = hxge_init_txdma_channels(hxgep);
984 	if (status != HXGE_OK) {
985 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
986 		goto hxge_init_fail3;
987 	}
988 
989 	/*
990 	 * Initialize and enable RXDMA channels.
991 	 */
992 	status = hxge_init_rxdma_channels(hxgep);
993 	if (status != HXGE_OK) {
994 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
995 		goto hxge_init_fail4;
996 	}
997 
998 	/*
999 	 * Initialize TCAM
1000 	 */
1001 	status = hxge_classify_init(hxgep);
1002 	if (status != HXGE_OK) {
1003 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1004 		goto hxge_init_fail5;
1005 	}
1006 
1007 	/*
1008 	 * Initialize the VMAC block.
1009 	 */
1010 	status = hxge_vmac_init(hxgep);
1011 	if (status != HXGE_OK) {
1012 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1013 		goto hxge_init_fail5;
1014 	}
1015 
1016 	/* Bringup - this may be unnecessary when PXE and FCODE available */
1017 	status = hxge_pfc_set_default_mac_addr(hxgep);
1018 	if (status != HXGE_OK) {
1019 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1020 		    "Default Address Failure\n"));
1021 		goto hxge_init_fail5;
1022 	}
1023 
1024 	hxge_intrs_enable(hxgep);
1025 
1026 	/* Keep copy of MSIx table written */
1027 	hxge_store_msix_table(hxgep);
1028 
1029 	/*
1030 	 * Enable hardware interrupts.
1031 	 */
1032 	hxge_intr_hw_enable(hxgep);
1033 	hxgep->drv_state |= STATE_HW_INITIALIZED;
1034 
1035 	goto hxge_init_exit;
1036 
1037 hxge_init_fail5:
1038 	hxge_uninit_rxdma_channels(hxgep);
1039 hxge_init_fail4:
1040 	hxge_uninit_txdma_channels(hxgep);
1041 hxge_init_fail3:
1042 	hxge_free_mem_pool(hxgep);
1043 hxge_init_fail1:
1044 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1045 	    "<== hxge_init status (failed) = 0x%08x", status));
1046 	return (status);
1047 
1048 hxge_init_exit:
1049 
1050 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1051 	    status));
1052 
1053 	return (status);
1054 }
1055 
1056 timeout_id_t
1057 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1058 {
1059 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1060 		return (timeout(func, (caddr_t)hxgep,
1061 		    drv_usectohz(1000 * msec)));
1062 	}
1063 	return (NULL);
1064 }
1065 
1066 /*ARGSUSED*/
1067 void
1068 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1069 {
1070 	if (timerid) {
1071 		(void) untimeout(timerid);
1072 	}
1073 }
1074 
1075 void
1076 hxge_uninit(p_hxge_t hxgep)
1077 {
1078 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1079 
1080 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1081 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1082 		    "==> hxge_uninit: not initialized"));
1083 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1084 		return;
1085 	}
1086 
1087 	/* Stop timer */
1088 	if (hxgep->hxge_timerid) {
1089 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1090 		hxgep->hxge_timerid = 0;
1091 	}
1092 
1093 	(void) hxge_intr_hw_disable(hxgep);
1094 
1095 	/* Reset the receive VMAC side.  */
1096 	(void) hxge_rx_vmac_disable(hxgep);
1097 
1098 	/* Free classification resources */
1099 	(void) hxge_classify_uninit(hxgep);
1100 
1101 	/* Reset the transmit/receive DMA side.  */
1102 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1103 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1104 
1105 	hxge_uninit_txdma_channels(hxgep);
1106 	hxge_uninit_rxdma_channels(hxgep);
1107 
1108 	/* Reset the transmit VMAC side.  */
1109 	(void) hxge_tx_vmac_disable(hxgep);
1110 
1111 	hxge_free_mem_pool(hxgep);
1112 
1113 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1114 
1115 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1116 }
1117 
1118 void
1119 hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
1120 {
1121 #if defined(__i386)
1122 	size_t		reg;
1123 #else
1124 	uint64_t	reg;
1125 #endif
1126 	uint64_t	regdata;
1127 	int		i, retry;
1128 
1129 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1130 	regdata = 0;
1131 	retry = 1;
1132 
1133 	for (i = 0; i < retry; i++) {
1134 		HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
1135 	}
1136 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1137 }
1138 
1139 void
1140 hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
1141 {
1142 #if defined(__i386)
1143 	size_t		reg;
1144 #else
1145 	uint64_t	reg;
1146 #endif
1147 	uint64_t	buf[2];
1148 
1149 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1150 #if defined(__i386)
1151 	reg = (size_t)buf[0];
1152 #else
1153 	reg = buf[0];
1154 #endif
1155 
1156 	HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
1157 }
1158 
1159 /*ARGSUSED*/
1160 /*VARARGS*/
1161 void
1162 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1163 {
1164 	char		msg_buffer[1048];
1165 	char		prefix_buffer[32];
1166 	int		instance;
1167 	uint64_t	debug_level;
1168 	int		cmn_level = CE_CONT;
1169 	va_list		ap;
1170 
1171 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1172 	    hxgep->hxge_debug_level;
1173 
1174 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1175 	    (level == HXGE_ERR_CTL)) {
1176 		/* do the msg processing */
1177 		if (hxge_debug_init == 0) {
1178 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1179 			hxge_debug_init = 1;
1180 		}
1181 
1182 		MUTEX_ENTER(&hxgedebuglock);
1183 
1184 		if ((level & HXGE_NOTE)) {
1185 			cmn_level = CE_NOTE;
1186 		}
1187 
1188 		if (level & HXGE_ERR_CTL) {
1189 			cmn_level = CE_WARN;
1190 		}
1191 
1192 		va_start(ap, fmt);
1193 		(void) vsprintf(msg_buffer, fmt, ap);
1194 		va_end(ap);
1195 
1196 		if (hxgep == NULL) {
1197 			instance = -1;
1198 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1199 		} else {
1200 			instance = hxgep->instance;
1201 			(void) sprintf(prefix_buffer,
1202 			    "%s%d :", "hxge", instance);
1203 		}
1204 
1205 		MUTEX_EXIT(&hxgedebuglock);
1206 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1207 	}
1208 }
1209 
1210 char *
1211 hxge_dump_packet(char *addr, int size)
1212 {
1213 	uchar_t		*ap = (uchar_t *)addr;
1214 	int		i;
1215 	static char	etherbuf[1024];
1216 	char		*cp = etherbuf;
1217 	char		digits[] = "0123456789abcdef";
1218 
1219 	if (!size)
1220 		size = 60;
1221 
1222 	if (size > MAX_DUMP_SZ) {
1223 		/* Dump the leading bytes */
1224 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1225 			if (*ap > 0x0f)
1226 				*cp++ = digits[*ap >> 4];
1227 			*cp++ = digits[*ap++ & 0xf];
1228 			*cp++ = ':';
1229 		}
1230 		for (i = 0; i < 20; i++)
1231 			*cp++ = '.';
1232 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1233 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1234 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1235 			if (*ap > 0x0f)
1236 				*cp++ = digits[*ap >> 4];
1237 			*cp++ = digits[*ap++ & 0xf];
1238 			*cp++ = ':';
1239 		}
1240 	} else {
1241 		for (i = 0; i < size; i++) {
1242 			if (*ap > 0x0f)
1243 				*cp++ = digits[*ap >> 4];
1244 			*cp++ = digits[*ap++ & 0xf];
1245 			*cp++ = ':';
1246 		}
1247 	}
1248 	*--cp = 0;
1249 	return (etherbuf);
1250 }
1251 
1252 static void
1253 hxge_suspend(p_hxge_t hxgep)
1254 {
1255 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1256 
1257 	/*
1258 	 * Stop the link status timer before hxge_intrs_disable() to avoid
1259 	 * accessing the the MSIX table simultaneously. Note that the timer
1260 	 * routine polls for MSIX parity errors.
1261 	 */
1262 	MUTEX_ENTER(&hxgep->timeout.lock);
1263 	if (hxgep->timeout.id)
1264 		(void) untimeout(hxgep->timeout.id);
1265 	MUTEX_EXIT(&hxgep->timeout.lock);
1266 
1267 	hxge_intrs_disable(hxgep);
1268 	hxge_destroy_dev(hxgep);
1269 
1270 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1271 }
1272 
1273 static hxge_status_t
1274 hxge_resume(p_hxge_t hxgep)
1275 {
1276 	hxge_status_t status = HXGE_OK;
1277 
1278 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1279 	hxgep->suspended = DDI_RESUME;
1280 
1281 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1282 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1283 
1284 	(void) hxge_rx_vmac_enable(hxgep);
1285 	(void) hxge_tx_vmac_enable(hxgep);
1286 
1287 	hxge_intrs_enable(hxgep);
1288 
1289 	/* Keep copy of MSIx table written */
1290 	hxge_store_msix_table(hxgep);
1291 
1292 	hxgep->suspended = 0;
1293 
1294 	/*
1295 	 * Resume the link status timer after hxge_intrs_enable to avoid
1296 	 * accessing MSIX table simultaneously.
1297 	 */
1298 	MUTEX_ENTER(&hxgep->timeout.lock);
1299 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1300 	    hxgep->timeout.ticks);
1301 	MUTEX_EXIT(&hxgep->timeout.lock);
1302 
1303 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1304 	    "<== hxge_resume status = 0x%x", status));
1305 
1306 	return (status);
1307 }
1308 
1309 hxge_status_t
1310 hxge_setup_dev(p_hxge_t hxgep)
1311 {
1312 	hxge_status_t status = HXGE_OK;
1313 
1314 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1315 
1316 	status = hxge_link_init(hxgep);
1317 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1318 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1319 		    "Bad register acc handle"));
1320 		status = HXGE_ERROR;
1321 	}
1322 
1323 	if (status != HXGE_OK) {
1324 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1325 		    " hxge_setup_dev status (link init 0x%08x)", status));
1326 		goto hxge_setup_dev_exit;
1327 	}
1328 
1329 hxge_setup_dev_exit:
1330 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1331 	    "<== hxge_setup_dev status = 0x%08x", status));
1332 
1333 	return (status);
1334 }
1335 
1336 static void
1337 hxge_destroy_dev(p_hxge_t hxgep)
1338 {
1339 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1340 
1341 	(void) hxge_hw_stop(hxgep);
1342 
1343 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1344 }
1345 
1346 static hxge_status_t
1347 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1348 {
1349 	int			ddi_status = DDI_SUCCESS;
1350 	uint_t			count;
1351 	ddi_dma_cookie_t	cookie;
1352 	uint_t			iommu_pagesize;
1353 	hxge_status_t		status = HXGE_OK;
1354 
1355 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1356 
1357 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1358 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1359 
1360 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1361 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1362 	    " default_block_size %d iommu_pagesize %d",
1363 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1364 	    hxgep->rx_default_block_size, iommu_pagesize));
1365 
1366 	if (iommu_pagesize != 0) {
1367 		if (hxgep->sys_page_sz == iommu_pagesize) {
1368 			/* Hydra support up to 8K pages */
1369 			if (iommu_pagesize > 0x2000)
1370 				hxgep->sys_page_sz = 0x2000;
1371 		} else {
1372 			if (hxgep->sys_page_sz > iommu_pagesize)
1373 				hxgep->sys_page_sz = iommu_pagesize;
1374 		}
1375 	}
1376 
1377 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1378 
1379 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1380 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1381 	    "default_block_size %d page mask %d",
1382 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1383 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1384 
1385 	switch (hxgep->sys_page_sz) {
1386 	default:
1387 		hxgep->sys_page_sz = 0x1000;
1388 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1389 		hxgep->rx_default_block_size = 0x1000;
1390 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1391 		break;
1392 	case 0x1000:
1393 		hxgep->rx_default_block_size = 0x1000;
1394 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1395 		break;
1396 	case 0x2000:
1397 		hxgep->rx_default_block_size = 0x2000;
1398 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1399 		break;
1400 	}
1401 
1402 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1403 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1404 
1405 	/*
1406 	 * Get the system DMA burst size.
1407 	 */
1408 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1409 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1410 	if (ddi_status != DDI_SUCCESS) {
1411 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1412 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1413 		goto hxge_get_soft_properties_exit;
1414 	}
1415 
1416 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1417 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1418 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1419 	    &cookie, &count);
1420 	if (ddi_status != DDI_DMA_MAPPED) {
1421 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1422 		    "Binding spare handle to find system burstsize failed."));
1423 		ddi_status = DDI_FAILURE;
1424 		goto hxge_get_soft_properties_fail1;
1425 	}
1426 
1427 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1428 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1429 
1430 hxge_get_soft_properties_fail1:
1431 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1432 
1433 hxge_get_soft_properties_exit:
1434 
1435 	if (ddi_status != DDI_SUCCESS)
1436 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1437 
1438 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1439 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1440 
1441 	return (status);
1442 }
1443 
1444 hxge_status_t
1445 hxge_alloc_mem_pool(p_hxge_t hxgep)
1446 {
1447 	hxge_status_t status = HXGE_OK;
1448 
1449 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1450 
1451 	status = hxge_alloc_rx_mem_pool(hxgep);
1452 	if (status != HXGE_OK) {
1453 		return (HXGE_ERROR);
1454 	}
1455 
1456 	status = hxge_alloc_tx_mem_pool(hxgep);
1457 	if (status != HXGE_OK) {
1458 		hxge_free_rx_mem_pool(hxgep);
1459 		return (HXGE_ERROR);
1460 	}
1461 
1462 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1463 	return (HXGE_OK);
1464 }
1465 
1466 static void
1467 hxge_free_mem_pool(p_hxge_t hxgep)
1468 {
1469 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1470 
1471 	hxge_free_rx_mem_pool(hxgep);
1472 	hxge_free_tx_mem_pool(hxgep);
1473 
1474 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1475 }
1476 
1477 static hxge_status_t
1478 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1479 {
1480 	int			i, j;
1481 	uint32_t		ndmas, st_rdc;
1482 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1483 	p_hxge_hw_pt_cfg_t	p_cfgp;
1484 	p_hxge_dma_pool_t	dma_poolp;
1485 	p_hxge_dma_common_t	*dma_buf_p;
1486 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1487 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1488 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1489 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1490 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1491 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1492 	size_t			rx_buf_alloc_size;
1493 	size_t			rx_rbr_cntl_alloc_size;
1494 	size_t			rx_rcr_cntl_alloc_size;
1495 	size_t			rx_mbox_cntl_alloc_size;
1496 	uint32_t		*num_chunks;	/* per dma */
1497 	hxge_status_t		status = HXGE_OK;
1498 
1499 	uint32_t		hxge_port_rbr_size;
1500 	uint32_t		hxge_port_rbr_spare_size;
1501 	uint32_t		hxge_port_rcr_size;
1502 
1503 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1504 
1505 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1506 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1507 	st_rdc = p_cfgp->start_rdc;
1508 	ndmas = p_cfgp->max_rdcs;
1509 
1510 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1511 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1512 
1513 	/*
1514 	 * Allocate memory for each receive DMA channel.
1515 	 */
1516 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1517 	    KM_SLEEP);
1518 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1519 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1520 
1521 	dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1522 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1523 	dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1524 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1525 	dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1526 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1527 	dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1528 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1529 	dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1530 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1531 	dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1532 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1533 
1534 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1535 	    KM_SLEEP);
1536 
1537 	/*
1538 	 * Assume that each DMA channel will be configured with default block
1539 	 * size. rbr block counts are mod of batch count (16).
1540 	 */
1541 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1542 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1543 
1544 	if (!hxge_port_rbr_size) {
1545 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1546 	}
1547 
1548 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1549 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1550 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1551 	}
1552 
1553 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1554 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1555 
1556 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1557 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1558 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1559 	}
1560 
1561 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1562 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1563 
1564 	/*
1565 	 * Addresses of receive block ring, receive completion ring and the
1566 	 * mailbox must be all cache-aligned (64 bytes).
1567 	 */
1568 	rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1569 	rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1570 	rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1571 	rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1572 
1573 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1574 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1575 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1576 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1577 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1578 
1579 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1580 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1581 
1582 	/*
1583 	 * Allocate memory for receive buffers and descriptor rings. Replace
1584 	 * allocation functions with interface functions provided by the
1585 	 * partition manager when it is available.
1586 	 */
1587 	/*
1588 	 * Allocate memory for the receive buffer blocks.
1589 	 */
1590 	for (i = 0; i < ndmas; i++) {
1591 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1592 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1593 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1594 		    i, dma_buf_p[i], &dma_buf_p[i]));
1595 
1596 		num_chunks[i] = 0;
1597 
1598 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1599 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1600 		    &num_chunks[i]);
1601 		if (status != HXGE_OK) {
1602 			break;
1603 		}
1604 
1605 		st_rdc++;
1606 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1607 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1608 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1609 		    dma_buf_p[i], &dma_buf_p[i]));
1610 	}
1611 
1612 	if (i < ndmas) {
1613 		goto hxge_alloc_rx_mem_fail1;
1614 	}
1615 
1616 	/*
1617 	 * Allocate memory for descriptor rings and mailbox.
1618 	 */
1619 	st_rdc = p_cfgp->start_rdc;
1620 	for (j = 0; j < ndmas; j++) {
1621 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1622 		    &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1623 		    rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1624 			break;
1625 		}
1626 
1627 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1628 		    &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1629 		    rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1630 			break;
1631 		}
1632 
1633 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1634 		    &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1635 		    rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1636 			break;
1637 		}
1638 		st_rdc++;
1639 	}
1640 
1641 	if (j < ndmas) {
1642 		goto hxge_alloc_rx_mem_fail2;
1643 	}
1644 
1645 	dma_poolp->ndmas = ndmas;
1646 	dma_poolp->num_chunks = num_chunks;
1647 	dma_poolp->buf_allocated = B_TRUE;
1648 	hxgep->rx_buf_pool_p = dma_poolp;
1649 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1650 
1651 	dma_rbr_cntl_poolp->ndmas = ndmas;
1652 	dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1653 	hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1654 	dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1655 
1656 	dma_rcr_cntl_poolp->ndmas = ndmas;
1657 	dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1658 	hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1659 	dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1660 
1661 	dma_mbox_cntl_poolp->ndmas = ndmas;
1662 	dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1663 	hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1664 	dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1665 
1666 	goto hxge_alloc_rx_mem_pool_exit;
1667 
1668 hxge_alloc_rx_mem_fail2:
1669 	/* Free control buffers */
1670 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1671 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1672 	for (; j >= 0; j--) {
1673 		hxge_free_rx_cntl_dma(hxgep,
1674 		    (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1675 		hxge_free_rx_cntl_dma(hxgep,
1676 		    (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1677 		hxge_free_rx_cntl_dma(hxgep,
1678 		    (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1679 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1680 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1681 	}
1682 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1683 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1684 
1685 hxge_alloc_rx_mem_fail1:
1686 	/* Free data buffers */
1687 	i--;
1688 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1689 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1690 	for (; i >= 0; i--) {
1691 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1692 		    num_chunks[i]);
1693 	}
1694 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1695 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1696 
1697 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1698 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1699 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1700 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1701 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1702 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1703 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1704 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1705 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1706 
1707 hxge_alloc_rx_mem_pool_exit:
1708 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1709 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1710 
1711 	return (status);
1712 }
1713 
1714 static void
1715 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1716 {
1717 	uint32_t		i, ndmas;
1718 	p_hxge_dma_pool_t	dma_poolp;
1719 	p_hxge_dma_common_t	*dma_buf_p;
1720 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1721 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1722 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1723 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1724 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1725 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1726 	uint32_t		*num_chunks;
1727 
1728 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1729 
1730 	dma_poolp = hxgep->rx_buf_pool_p;
1731 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1732 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1733 		    "(null rx buf pool or buf not allocated"));
1734 		return;
1735 	}
1736 
1737 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1738 	if (dma_rbr_cntl_poolp == NULL ||
1739 	    (!dma_rbr_cntl_poolp->buf_allocated)) {
1740 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1741 		    "<== hxge_free_rx_mem_pool "
1742 		    "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1743 		return;
1744 	}
1745 
1746 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1747 	if (dma_rcr_cntl_poolp == NULL ||
1748 	    (!dma_rcr_cntl_poolp->buf_allocated)) {
1749 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1750 		    "<== hxge_free_rx_mem_pool "
1751 		    "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1752 		return;
1753 	}
1754 
1755 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1756 	if (dma_mbox_cntl_poolp == NULL ||
1757 	    (!dma_mbox_cntl_poolp->buf_allocated)) {
1758 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1759 		    "<== hxge_free_rx_mem_pool "
1760 		    "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1761 		return;
1762 	}
1763 
1764 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1765 	num_chunks = dma_poolp->num_chunks;
1766 
1767 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1768 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1769 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1770 	ndmas = dma_rbr_cntl_poolp->ndmas;
1771 
1772 	for (i = 0; i < ndmas; i++) {
1773 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1774 	}
1775 
1776 	for (i = 0; i < ndmas; i++) {
1777 		hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1778 		hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1779 		hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1780 	}
1781 
1782 	for (i = 0; i < ndmas; i++) {
1783 		KMEM_FREE(dma_buf_p[i],
1784 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1785 		KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1786 		KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1787 		KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1788 	}
1789 
1790 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1791 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1792 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1793 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1794 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1795 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1796 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1797 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1798 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1799 
1800 	hxgep->rx_buf_pool_p = NULL;
1801 	hxgep->rx_rbr_cntl_pool_p = NULL;
1802 	hxgep->rx_rcr_cntl_pool_p = NULL;
1803 	hxgep->rx_mbox_cntl_pool_p = NULL;
1804 
1805 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1806 }
1807 
1808 static hxge_status_t
1809 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1810     p_hxge_dma_common_t *dmap,
1811     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1812 {
1813 	p_hxge_dma_common_t	rx_dmap;
1814 	hxge_status_t		status = HXGE_OK;
1815 	size_t			total_alloc_size;
1816 	size_t			allocated = 0;
1817 	int			i, size_index, array_size;
1818 
1819 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1820 
1821 	rx_dmap = (p_hxge_dma_common_t)
1822 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1823 
1824 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1825 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1826 	    dma_channel, alloc_size, block_size, dmap));
1827 
1828 	total_alloc_size = alloc_size;
1829 
1830 	i = 0;
1831 	size_index = 0;
1832 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1833 	while ((size_index < array_size) &&
1834 	    (alloc_sizes[size_index] < alloc_size))
1835 		size_index++;
1836 	if (size_index >= array_size) {
1837 		size_index = array_size - 1;
1838 	}
1839 
1840 	while ((allocated < total_alloc_size) &&
1841 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1842 		rx_dmap[i].dma_chunk_index = i;
1843 		rx_dmap[i].block_size = block_size;
1844 		rx_dmap[i].alength = alloc_sizes[size_index];
1845 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1846 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1847 		rx_dmap[i].dma_channel = dma_channel;
1848 		rx_dmap[i].contig_alloc_type = B_FALSE;
1849 
1850 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1851 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1852 		    "i %d nblocks %d alength %d",
1853 		    dma_channel, i, &rx_dmap[i], block_size,
1854 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1855 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1856 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1857 		    &hxge_dev_buf_dma_acc_attr,
1858 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1859 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1860 		if (status != HXGE_OK) {
1861 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1862 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1863 			    " for size: %d", alloc_sizes[size_index]));
1864 			size_index--;
1865 		} else {
1866 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1867 			    " alloc_rx_buf_dma allocated rdc %d "
1868 			    "chunk %d size %x dvma %x bufp %llx ",
1869 			    dma_channel, i, rx_dmap[i].alength,
1870 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1871 			i++;
1872 			allocated += alloc_sizes[size_index];
1873 		}
1874 	}
1875 
1876 	if (allocated < total_alloc_size) {
1877 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1878 		    " hxge_alloc_rx_buf_dma failed due to"
1879 		    " allocated(%d) < required(%d)",
1880 		    allocated, total_alloc_size));
1881 		goto hxge_alloc_rx_mem_fail1;
1882 	}
1883 
1884 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1885 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1886 
1887 	*num_chunks = i;
1888 	*dmap = rx_dmap;
1889 
1890 	goto hxge_alloc_rx_mem_exit;
1891 
1892 hxge_alloc_rx_mem_fail1:
1893 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1894 
1895 hxge_alloc_rx_mem_exit:
1896 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1897 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1898 
1899 	return (status);
1900 }
1901 
1902 /*ARGSUSED*/
1903 static void
1904 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1905     uint32_t num_chunks)
1906 {
1907 	int i;
1908 
1909 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1910 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1911 
1912 	for (i = 0; i < num_chunks; i++) {
1913 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1914 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1915 		hxge_dma_mem_free(dmap++);
1916 	}
1917 
1918 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1919 }
1920 
1921 /*ARGSUSED*/
1922 static hxge_status_t
1923 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1924     p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1925 {
1926 	p_hxge_dma_common_t	rx_dmap;
1927 	hxge_status_t		status = HXGE_OK;
1928 
1929 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1930 
1931 	rx_dmap = (p_hxge_dma_common_t)
1932 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1933 
1934 	rx_dmap->contig_alloc_type = B_FALSE;
1935 
1936 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1937 	    attr, size, &hxge_dev_desc_dma_acc_attr,
1938 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1939 	if (status != HXGE_OK) {
1940 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1941 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1942 		    " for size: %d", size));
1943 		goto hxge_alloc_rx_cntl_dma_fail1;
1944 	}
1945 
1946 	*dmap = rx_dmap;
1947 
1948 	goto hxge_alloc_rx_cntl_dma_exit;
1949 
1950 hxge_alloc_rx_cntl_dma_fail1:
1951 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1952 
1953 hxge_alloc_rx_cntl_dma_exit:
1954 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1955 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1956 
1957 	return (status);
1958 }
1959 
1960 /*ARGSUSED*/
1961 static void
1962 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1963 {
1964 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1965 
1966 	hxge_dma_mem_free(dmap);
1967 
1968 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1969 }
1970 
1971 static hxge_status_t
1972 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1973 {
1974 	hxge_status_t		status = HXGE_OK;
1975 	int			i, j;
1976 	uint32_t		ndmas, st_tdc;
1977 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1978 	p_hxge_hw_pt_cfg_t	p_cfgp;
1979 	p_hxge_dma_pool_t	dma_poolp;
1980 	p_hxge_dma_common_t	*dma_buf_p;
1981 	p_hxge_dma_pool_t	dma_cntl_poolp;
1982 	p_hxge_dma_common_t	*dma_cntl_p;
1983 	size_t			tx_buf_alloc_size;
1984 	size_t			tx_cntl_alloc_size;
1985 	uint32_t		*num_chunks;	/* per dma */
1986 
1987 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1988 
1989 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1990 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1991 	st_tdc = p_cfgp->start_tdc;
1992 	ndmas = p_cfgp->max_tdcs;
1993 
1994 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1995 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1996 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1997 	/*
1998 	 * Allocate memory for each transmit DMA channel.
1999 	 */
2000 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
2001 	    KM_SLEEP);
2002 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
2003 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
2004 
2005 	dma_cntl_poolp = (p_hxge_dma_pool_t)
2006 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
2007 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
2008 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
2009 
2010 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
2011 
2012 	/*
2013 	 * Assume that each DMA channel will be configured with default
2014 	 * transmit bufer size for copying transmit data. (For packet payload
2015 	 * over this limit, packets will not be copied.)
2016 	 */
2017 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
2018 
2019 	/*
2020 	 * Addresses of transmit descriptor ring and the mailbox must be all
2021 	 * cache-aligned (64 bytes).
2022 	 */
2023 	tx_cntl_alloc_size = hxge_tx_ring_size;
2024 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
2025 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
2026 
2027 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
2028 	    KM_SLEEP);
2029 
2030 	/*
2031 	 * Allocate memory for transmit buffers and descriptor rings. Replace
2032 	 * allocation functions with interface functions provided by the
2033 	 * partition manager when it is available.
2034 	 *
2035 	 * Allocate memory for the transmit buffer pool.
2036 	 */
2037 	for (i = 0; i < ndmas; i++) {
2038 		num_chunks[i] = 0;
2039 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
2040 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
2041 		if (status != HXGE_OK) {
2042 			break;
2043 		}
2044 		st_tdc++;
2045 	}
2046 
2047 	if (i < ndmas) {
2048 		goto hxge_alloc_tx_mem_pool_fail1;
2049 	}
2050 
2051 	st_tdc = p_cfgp->start_tdc;
2052 
2053 	/*
2054 	 * Allocate memory for descriptor rings and mailbox.
2055 	 */
2056 	for (j = 0; j < ndmas; j++) {
2057 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2058 		    tx_cntl_alloc_size);
2059 		if (status != HXGE_OK) {
2060 			break;
2061 		}
2062 		st_tdc++;
2063 	}
2064 
2065 	if (j < ndmas) {
2066 		goto hxge_alloc_tx_mem_pool_fail2;
2067 	}
2068 
2069 	dma_poolp->ndmas = ndmas;
2070 	dma_poolp->num_chunks = num_chunks;
2071 	dma_poolp->buf_allocated = B_TRUE;
2072 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2073 	hxgep->tx_buf_pool_p = dma_poolp;
2074 
2075 	dma_cntl_poolp->ndmas = ndmas;
2076 	dma_cntl_poolp->buf_allocated = B_TRUE;
2077 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2078 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2079 
2080 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2081 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2082 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2083 
2084 	goto hxge_alloc_tx_mem_pool_exit;
2085 
2086 hxge_alloc_tx_mem_pool_fail2:
2087 	/* Free control buffers */
2088 	j--;
2089 	for (; j >= 0; j--) {
2090 		hxge_free_tx_cntl_dma(hxgep,
2091 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
2092 	}
2093 
2094 hxge_alloc_tx_mem_pool_fail1:
2095 	/* Free data buffers */
2096 	i--;
2097 	for (; i >= 0; i--) {
2098 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2099 		    num_chunks[i]);
2100 	}
2101 
2102 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2103 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2104 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2105 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2106 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2107 
2108 hxge_alloc_tx_mem_pool_exit:
2109 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2110 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2111 
2112 	return (status);
2113 }
2114 
2115 static hxge_status_t
2116 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2117     p_hxge_dma_common_t *dmap, size_t alloc_size,
2118     size_t block_size, uint32_t *num_chunks)
2119 {
2120 	p_hxge_dma_common_t	tx_dmap;
2121 	hxge_status_t		status = HXGE_OK;
2122 	size_t			total_alloc_size;
2123 	size_t			allocated = 0;
2124 	int			i, size_index, array_size;
2125 
2126 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2127 
2128 	tx_dmap = (p_hxge_dma_common_t)
2129 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2130 
2131 	total_alloc_size = alloc_size;
2132 	i = 0;
2133 	size_index = 0;
2134 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
2135 	while ((size_index < array_size) &&
2136 	    (alloc_sizes[size_index] < alloc_size))
2137 		size_index++;
2138 	if (size_index >= array_size) {
2139 		size_index = array_size - 1;
2140 	}
2141 
2142 	while ((allocated < total_alloc_size) &&
2143 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2144 		tx_dmap[i].dma_chunk_index = i;
2145 		tx_dmap[i].block_size = block_size;
2146 		tx_dmap[i].alength = alloc_sizes[size_index];
2147 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2148 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2149 		tx_dmap[i].dma_channel = dma_channel;
2150 		tx_dmap[i].contig_alloc_type = B_FALSE;
2151 
2152 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2153 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
2154 		    &hxge_dev_buf_dma_acc_attr,
2155 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
2156 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
2157 		if (status != HXGE_OK) {
2158 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2159 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
2160 			    " for size: %d", alloc_sizes[size_index]));
2161 			size_index--;
2162 		} else {
2163 			i++;
2164 			allocated += alloc_sizes[size_index];
2165 		}
2166 	}
2167 
2168 	if (allocated < total_alloc_size) {
2169 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2170 		    " hxge_alloc_tx_buf_dma: failed due to"
2171 		    " allocated(%d) < required(%d)",
2172 		    allocated, total_alloc_size));
2173 		goto hxge_alloc_tx_mem_fail1;
2174 	}
2175 
2176 	*num_chunks = i;
2177 	*dmap = tx_dmap;
2178 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2179 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2180 	    *dmap, i));
2181 	goto hxge_alloc_tx_mem_exit;
2182 
2183 hxge_alloc_tx_mem_fail1:
2184 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2185 
2186 hxge_alloc_tx_mem_exit:
2187 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2188 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2189 
2190 	return (status);
2191 }
2192 
2193 /*ARGSUSED*/
2194 static void
2195 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2196     uint32_t num_chunks)
2197 {
2198 	int i;
2199 
2200 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2201 
2202 	for (i = 0; i < num_chunks; i++) {
2203 		hxge_dma_mem_free(dmap++);
2204 	}
2205 
2206 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2207 }
2208 
2209 /*ARGSUSED*/
2210 static hxge_status_t
2211 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2212     p_hxge_dma_common_t *dmap, size_t size)
2213 {
2214 	p_hxge_dma_common_t	tx_dmap;
2215 	hxge_status_t		status = HXGE_OK;
2216 
2217 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2218 
2219 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2220 	    KM_SLEEP);
2221 
2222 	tx_dmap->contig_alloc_type = B_FALSE;
2223 
2224 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2225 	    &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2226 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2227 	if (status != HXGE_OK) {
2228 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2229 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2230 		    " for size: %d", size));
2231 		goto hxge_alloc_tx_cntl_dma_fail1;
2232 	}
2233 
2234 	*dmap = tx_dmap;
2235 
2236 	goto hxge_alloc_tx_cntl_dma_exit;
2237 
2238 hxge_alloc_tx_cntl_dma_fail1:
2239 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2240 
2241 hxge_alloc_tx_cntl_dma_exit:
2242 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2243 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2244 
2245 	return (status);
2246 }
2247 
2248 /*ARGSUSED*/
2249 static void
2250 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2251 {
2252 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2253 
2254 	hxge_dma_mem_free(dmap);
2255 
2256 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2257 }
2258 
2259 static void
2260 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2261 {
2262 	uint32_t		i, ndmas;
2263 	p_hxge_dma_pool_t	dma_poolp;
2264 	p_hxge_dma_common_t	*dma_buf_p;
2265 	p_hxge_dma_pool_t	dma_cntl_poolp;
2266 	p_hxge_dma_common_t	*dma_cntl_p;
2267 	uint32_t		*num_chunks;
2268 
2269 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2270 
2271 	dma_poolp = hxgep->tx_buf_pool_p;
2272 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2273 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2274 		    "<== hxge_free_tx_mem_pool "
2275 		    "(null rx buf pool or buf not allocated"));
2276 		return;
2277 	}
2278 
2279 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2280 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2281 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2282 		    "<== hxge_free_tx_mem_pool "
2283 		    "(null tx cntl buf pool or cntl buf not allocated"));
2284 		return;
2285 	}
2286 
2287 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2288 	num_chunks = dma_poolp->num_chunks;
2289 
2290 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2291 	ndmas = dma_cntl_poolp->ndmas;
2292 
2293 	for (i = 0; i < ndmas; i++) {
2294 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2295 	}
2296 
2297 	for (i = 0; i < ndmas; i++) {
2298 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2299 	}
2300 
2301 	for (i = 0; i < ndmas; i++) {
2302 		KMEM_FREE(dma_buf_p[i],
2303 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2304 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2305 	}
2306 
2307 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2308 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2309 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2310 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2311 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2312 
2313 	hxgep->tx_buf_pool_p = NULL;
2314 	hxgep->tx_cntl_pool_p = NULL;
2315 
2316 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2317 }
2318 
2319 /*ARGSUSED*/
2320 static hxge_status_t
2321 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2322     struct ddi_dma_attr *dma_attrp,
2323     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2324     p_hxge_dma_common_t dma_p)
2325 {
2326 	caddr_t		kaddrp;
2327 	int		ddi_status = DDI_SUCCESS;
2328 
2329 	dma_p->dma_handle = NULL;
2330 	dma_p->acc_handle = NULL;
2331 	dma_p->kaddrp = NULL;
2332 
2333 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2334 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2335 	if (ddi_status != DDI_SUCCESS) {
2336 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2337 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2338 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2339 	}
2340 
2341 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2342 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2343 	    &dma_p->acc_handle);
2344 	if (ddi_status != DDI_SUCCESS) {
2345 		/* The caller will decide whether it is fatal */
2346 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2347 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2348 		ddi_dma_free_handle(&dma_p->dma_handle);
2349 		dma_p->dma_handle = NULL;
2350 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2351 	}
2352 
2353 	if (dma_p->alength < length) {
2354 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2355 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2356 		ddi_dma_mem_free(&dma_p->acc_handle);
2357 		ddi_dma_free_handle(&dma_p->dma_handle);
2358 		dma_p->acc_handle = NULL;
2359 		dma_p->dma_handle = NULL;
2360 		return (HXGE_ERROR);
2361 	}
2362 
2363 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2364 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2365 	    &dma_p->dma_cookie, &dma_p->ncookies);
2366 	if (ddi_status != DDI_DMA_MAPPED) {
2367 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2368 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2369 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2370 		if (dma_p->acc_handle) {
2371 			ddi_dma_mem_free(&dma_p->acc_handle);
2372 			dma_p->acc_handle = NULL;
2373 		}
2374 		ddi_dma_free_handle(&dma_p->dma_handle);
2375 		dma_p->dma_handle = NULL;
2376 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2377 	}
2378 
2379 	if (dma_p->ncookies != 1) {
2380 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2381 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2382 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2383 		if (dma_p->acc_handle) {
2384 			ddi_dma_mem_free(&dma_p->acc_handle);
2385 			dma_p->acc_handle = NULL;
2386 		}
2387 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2388 		ddi_dma_free_handle(&dma_p->dma_handle);
2389 		dma_p->dma_handle = NULL;
2390 		return (HXGE_ERROR);
2391 	}
2392 
2393 	dma_p->kaddrp = kaddrp;
2394 #if defined(__i386)
2395 	dma_p->ioaddr_pp =
2396 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2397 #else
2398 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2399 #endif
2400 
2401 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2402 
2403 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2404 	    "dma buffer allocated: dma_p $%p "
2405 	    "return dmac_ladress from cookie $%p dmac_size %d "
2406 	    "dma_p->ioaddr_p $%p "
2407 	    "dma_p->orig_ioaddr_p $%p "
2408 	    "orig_vatopa $%p "
2409 	    "alength %d (0x%x) "
2410 	    "kaddrp $%p "
2411 	    "length %d (0x%x)",
2412 	    dma_p,
2413 	    dma_p->dma_cookie.dmac_laddress,
2414 	    dma_p->dma_cookie.dmac_size,
2415 	    dma_p->ioaddr_pp,
2416 	    dma_p->orig_ioaddr_pp,
2417 	    dma_p->orig_vatopa,
2418 	    dma_p->alength, dma_p->alength,
2419 	    kaddrp,
2420 	    length, length));
2421 
2422 	return (HXGE_OK);
2423 }
2424 
2425 static void
2426 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2427 {
2428 	if (dma_p == NULL)
2429 		return;
2430 
2431 	if (dma_p->dma_handle != NULL) {
2432 		if (dma_p->ncookies) {
2433 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2434 			dma_p->ncookies = 0;
2435 		}
2436 		ddi_dma_free_handle(&dma_p->dma_handle);
2437 		dma_p->dma_handle = NULL;
2438 	}
2439 
2440 	if (dma_p->acc_handle != NULL) {
2441 		ddi_dma_mem_free(&dma_p->acc_handle);
2442 		dma_p->acc_handle = NULL;
2443 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2444 	}
2445 
2446 	dma_p->kaddrp = NULL;
2447 	dma_p->alength = NULL;
2448 }
2449 
2450 /*
2451  *	hxge_m_start() -- start transmitting and receiving.
2452  *
2453  *	This function is called by the MAC layer when the first
2454  *	stream is open to prepare the hardware ready for sending
2455  *	and transmitting packets.
2456  */
2457 static int
2458 hxge_m_start(void *arg)
2459 {
2460 	p_hxge_t hxgep = (p_hxge_t)arg;
2461 
2462 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2463 
2464 	MUTEX_ENTER(hxgep->genlock);
2465 
2466 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2467 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2468 		    "<== hxge_m_start: initialization failed"));
2469 		MUTEX_EXIT(hxgep->genlock);
2470 		return (EIO);
2471 	}
2472 
2473 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2474 		/*
2475 		 * Start timer to check the system error and tx hangs
2476 		 */
2477 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2478 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2479 
2480 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2481 
2482 		hxgep->timeout.link_status = 0;
2483 		hxgep->timeout.report_link_status = B_TRUE;
2484 		hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2485 
2486 		/* Start the link status timer to check the link status */
2487 		MUTEX_ENTER(&hxgep->timeout.lock);
2488 		hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2489 		    hxgep->timeout.ticks);
2490 		MUTEX_EXIT(&hxgep->timeout.lock);
2491 	}
2492 
2493 	MUTEX_EXIT(hxgep->genlock);
2494 
2495 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2496 
2497 	return (0);
2498 }
2499 
2500 /*
2501  * hxge_m_stop(): stop transmitting and receiving.
2502  */
2503 static void
2504 hxge_m_stop(void *arg)
2505 {
2506 	p_hxge_t hxgep = (p_hxge_t)arg;
2507 
2508 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2509 
2510 	if (hxgep->hxge_timerid) {
2511 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2512 		hxgep->hxge_timerid = 0;
2513 	}
2514 
2515 	/* Stop the link status timer before unregistering */
2516 	MUTEX_ENTER(&hxgep->timeout.lock);
2517 	if (hxgep->timeout.id) {
2518 		(void) untimeout(hxgep->timeout.id);
2519 		hxgep->timeout.id = 0;
2520 	}
2521 	hxge_link_update(hxgep, LINK_STATE_DOWN);
2522 	MUTEX_EXIT(&hxgep->timeout.lock);
2523 
2524 	MUTEX_ENTER(hxgep->genlock);
2525 
2526 	hxge_uninit(hxgep);
2527 
2528 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2529 
2530 	MUTEX_EXIT(hxgep->genlock);
2531 
2532 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2533 }
2534 
2535 static int
2536 hxge_m_unicst(void *arg, const uint8_t *macaddr)
2537 {
2538 	p_hxge_t		hxgep = (p_hxge_t)arg;
2539 	struct ether_addr	addrp;
2540 	hxge_status_t		status;
2541 
2542 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
2543 
2544 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
2545 
2546 	status = hxge_set_mac_addr(hxgep, &addrp);
2547 	if (status != HXGE_OK) {
2548 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2549 		    "<== hxge_m_unicst: set unitcast failed"));
2550 		return (EINVAL);
2551 	}
2552 
2553 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
2554 
2555 	return (0);
2556 }
2557 
2558 static int
2559 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2560 {
2561 	p_hxge_t		hxgep = (p_hxge_t)arg;
2562 	struct ether_addr	addrp;
2563 
2564 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2565 
2566 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2567 
2568 	if (add) {
2569 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2570 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2571 			    "<== hxge_m_multicst: add multicast failed"));
2572 			return (EINVAL);
2573 		}
2574 	} else {
2575 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2576 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2577 			    "<== hxge_m_multicst: del multicast failed"));
2578 			return (EINVAL);
2579 		}
2580 	}
2581 
2582 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2583 
2584 	return (0);
2585 }
2586 
2587 static int
2588 hxge_m_promisc(void *arg, boolean_t on)
2589 {
2590 	p_hxge_t hxgep = (p_hxge_t)arg;
2591 
2592 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2593 
2594 	if (hxge_set_promisc(hxgep, on)) {
2595 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2596 		    "<== hxge_m_promisc: set promisc failed"));
2597 		return (EINVAL);
2598 	}
2599 
2600 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2601 
2602 	return (0);
2603 }
2604 
2605 static void
2606 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2607 {
2608 	p_hxge_t	hxgep = (p_hxge_t)arg;
2609 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2610 	boolean_t	need_privilege;
2611 	int		err;
2612 	int		cmd;
2613 
2614 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2615 
2616 	iocp = (struct iocblk *)mp->b_rptr;
2617 	iocp->ioc_error = 0;
2618 	need_privilege = B_TRUE;
2619 	cmd = iocp->ioc_cmd;
2620 
2621 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2622 	switch (cmd) {
2623 	default:
2624 		miocnak(wq, mp, 0, EINVAL);
2625 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2626 		return;
2627 
2628 	case LB_GET_INFO_SIZE:
2629 	case LB_GET_INFO:
2630 	case LB_GET_MODE:
2631 		need_privilege = B_FALSE;
2632 		break;
2633 
2634 	case LB_SET_MODE:
2635 		break;
2636 
2637 	case ND_GET:
2638 		need_privilege = B_FALSE;
2639 		break;
2640 	case ND_SET:
2641 		break;
2642 
2643 	case HXGE_GET64:
2644 	case HXGE_PUT64:
2645 	case HXGE_GET_TX_RING_SZ:
2646 	case HXGE_GET_TX_DESC:
2647 	case HXGE_TX_SIDE_RESET:
2648 	case HXGE_RX_SIDE_RESET:
2649 	case HXGE_GLOBAL_RESET:
2650 	case HXGE_RESET_MAC:
2651 	case HXGE_PUT_TCAM:
2652 	case HXGE_GET_TCAM:
2653 	case HXGE_RTRACE:
2654 
2655 		need_privilege = B_FALSE;
2656 		break;
2657 	}
2658 
2659 	if (need_privilege) {
2660 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2661 		if (err != 0) {
2662 			miocnak(wq, mp, 0, err);
2663 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2664 			    "<== hxge_m_ioctl: no priv"));
2665 			return;
2666 		}
2667 	}
2668 
2669 	switch (cmd) {
2670 	case ND_GET:
2671 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2672 	case ND_SET:
2673 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2674 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2675 		break;
2676 
2677 	case LB_GET_MODE:
2678 	case LB_SET_MODE:
2679 	case LB_GET_INFO_SIZE:
2680 	case LB_GET_INFO:
2681 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2682 		break;
2683 
2684 	case HXGE_PUT_TCAM:
2685 	case HXGE_GET_TCAM:
2686 	case HXGE_GET64:
2687 	case HXGE_PUT64:
2688 	case HXGE_GET_TX_RING_SZ:
2689 	case HXGE_GET_TX_DESC:
2690 	case HXGE_TX_SIDE_RESET:
2691 	case HXGE_RX_SIDE_RESET:
2692 	case HXGE_GLOBAL_RESET:
2693 	case HXGE_RESET_MAC:
2694 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2695 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2696 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2697 		break;
2698 	}
2699 
2700 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2701 }
2702 
2703 /*ARGSUSED*/
2704 boolean_t
2705 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2706 {
2707 	uint32_t		*txflags = cap_data;
2708 
2709 	switch (cap) {
2710 	case MAC_CAPAB_HCKSUM:
2711 		*txflags = HCKSUM_INET_PARTIAL;
2712 		break;
2713 
2714 	default:
2715 		return (B_FALSE);
2716 	}
2717 	return (B_TRUE);
2718 }
2719 
2720 static boolean_t
2721 hxge_param_locked(mac_prop_id_t pr_num)
2722 {
2723 	/*
2724 	 * All adv_* parameters are locked (read-only) while
2725 	 * the device is in any sort of loopback mode ...
2726 	 */
2727 	switch (pr_num) {
2728 		case MAC_PROP_ADV_1000FDX_CAP:
2729 		case MAC_PROP_EN_1000FDX_CAP:
2730 		case MAC_PROP_ADV_1000HDX_CAP:
2731 		case MAC_PROP_EN_1000HDX_CAP:
2732 		case MAC_PROP_ADV_100FDX_CAP:
2733 		case MAC_PROP_EN_100FDX_CAP:
2734 		case MAC_PROP_ADV_100HDX_CAP:
2735 		case MAC_PROP_EN_100HDX_CAP:
2736 		case MAC_PROP_ADV_10FDX_CAP:
2737 		case MAC_PROP_EN_10FDX_CAP:
2738 		case MAC_PROP_ADV_10HDX_CAP:
2739 		case MAC_PROP_EN_10HDX_CAP:
2740 		case MAC_PROP_AUTONEG:
2741 		case MAC_PROP_FLOWCTRL:
2742 			return (B_TRUE);
2743 	}
2744 	return (B_FALSE);
2745 }
2746 
2747 /*
2748  * callback functions for set/get of properties
2749  */
2750 static int
2751 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
2752     uint_t pr_valsize, const void *pr_val)
2753 {
2754 	hxge_t		*hxgep = barg;
2755 	p_hxge_stats_t	statsp;
2756 	int		err = 0;
2757 	uint32_t	new_mtu, old_framesize, new_framesize;
2758 
2759 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
2760 
2761 	statsp = hxgep->statsp;
2762 	mutex_enter(hxgep->genlock);
2763 	if (statsp->port_stats.lb_mode != hxge_lb_normal &&
2764 	    hxge_param_locked(pr_num)) {
2765 		/*
2766 		 * All adv_* parameters are locked (read-only)
2767 		 * while the device is in any sort of loopback mode.
2768 		 */
2769 		HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
2770 		    "==> hxge_m_setprop: loopback mode: read only"));
2771 		mutex_exit(hxgep->genlock);
2772 		return (EBUSY);
2773 	}
2774 
2775 	switch (pr_num) {
2776 		/*
2777 		 * These properties are either not exist or read only
2778 		 */
2779 		case MAC_PROP_EN_1000FDX_CAP:
2780 		case MAC_PROP_EN_100FDX_CAP:
2781 		case MAC_PROP_EN_10FDX_CAP:
2782 		case MAC_PROP_EN_1000HDX_CAP:
2783 		case MAC_PROP_EN_100HDX_CAP:
2784 		case MAC_PROP_EN_10HDX_CAP:
2785 		case MAC_PROP_ADV_1000FDX_CAP:
2786 		case MAC_PROP_ADV_1000HDX_CAP:
2787 		case MAC_PROP_ADV_100FDX_CAP:
2788 		case MAC_PROP_ADV_100HDX_CAP:
2789 		case MAC_PROP_ADV_10FDX_CAP:
2790 		case MAC_PROP_ADV_10HDX_CAP:
2791 		case MAC_PROP_STATUS:
2792 		case MAC_PROP_SPEED:
2793 		case MAC_PROP_DUPLEX:
2794 		case MAC_PROP_AUTONEG:
2795 		/*
2796 		 * Flow control is handled in the shared domain and
2797 		 * it is readonly here.
2798 		 */
2799 		case MAC_PROP_FLOWCTRL:
2800 			err = EINVAL;
2801 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
2802 			    "==> hxge_m_setprop:  read only property %d",
2803 			    pr_num));
2804 			break;
2805 
2806 		case MAC_PROP_MTU:
2807 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
2808 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
2809 			    "==> hxge_m_setprop: set MTU: %d", new_mtu));
2810 
2811 			new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
2812 			if (new_framesize == hxgep->vmac.maxframesize) {
2813 				err = 0;
2814 				break;
2815 			}
2816 
2817 			if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
2818 				err = EBUSY;
2819 				break;
2820 			}
2821 
2822 			if (new_framesize < MIN_FRAME_SIZE ||
2823 			    new_framesize > MAX_FRAME_SIZE) {
2824 				err = EINVAL;
2825 				break;
2826 			}
2827 
2828 			old_framesize = hxgep->vmac.maxframesize;
2829 			hxgep->vmac.maxframesize = (uint16_t)new_framesize;
2830 
2831 			if (hxge_vmac_set_framesize(hxgep)) {
2832 				hxgep->vmac.maxframesize =
2833 				    (uint16_t)old_framesize;
2834 				err = EINVAL;
2835 				break;
2836 			}
2837 
2838 			err = mac_maxsdu_update(hxgep->mach, new_mtu);
2839 			if (err) {
2840 				hxgep->vmac.maxframesize =
2841 				    (uint16_t)old_framesize;
2842 				(void) hxge_vmac_set_framesize(hxgep);
2843 			}
2844 
2845 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
2846 			    "==> hxge_m_setprop: set MTU: %d maxframe %d",
2847 			    new_mtu, hxgep->vmac.maxframesize));
2848 			break;
2849 
2850 		case MAC_PROP_PRIVATE:
2851 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
2852 			    "==> hxge_m_setprop: private property"));
2853 			err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
2854 			    pr_val);
2855 			break;
2856 
2857 		default:
2858 			err = ENOTSUP;
2859 			break;
2860 	}
2861 
2862 	mutex_exit(hxgep->genlock);
2863 
2864 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
2865 	    "<== hxge_m_setprop (return %d)", err));
2866 
2867 	return (err);
2868 }
2869 
2870 /* ARGSUSED */
2871 static int
2872 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
2873     void *pr_val)
2874 {
2875 	int		err = 0;
2876 	link_flowctrl_t	fl;
2877 
2878 	switch (pr_num) {
2879 	case MAC_PROP_DUPLEX:
2880 		*(uint8_t *)pr_val = 2;
2881 		break;
2882 	case MAC_PROP_AUTONEG:
2883 		*(uint8_t *)pr_val = 0;
2884 		break;
2885 	case MAC_PROP_FLOWCTRL:
2886 		if (pr_valsize < sizeof (link_flowctrl_t))
2887 			return (EINVAL);
2888 		fl = LINK_FLOWCTRL_TX;
2889 		bcopy(&fl, pr_val, sizeof (fl));
2890 		break;
2891 	default:
2892 		err = ENOTSUP;
2893 		break;
2894 	}
2895 	return (err);
2896 }
2897 
2898 static int
2899 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
2900     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
2901 {
2902 	hxge_t 		*hxgep = barg;
2903 	p_hxge_stats_t	statsp = hxgep->statsp;
2904 	int		err = 0;
2905 	link_flowctrl_t fl;
2906 	uint64_t	tmp = 0;
2907 	link_state_t	ls;
2908 
2909 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
2910 	    "==> hxge_m_getprop: pr_num %d", pr_num));
2911 
2912 	if (pr_valsize == 0)
2913 		return (EINVAL);
2914 
2915 	*perm = MAC_PROP_PERM_RW;
2916 
2917 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
2918 		err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val);
2919 		return (err);
2920 	}
2921 
2922 	bzero(pr_val, pr_valsize);
2923 	switch (pr_num) {
2924 		case MAC_PROP_DUPLEX:
2925 			*perm = MAC_PROP_PERM_READ;
2926 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
2927 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
2928 			    "==> hxge_m_getprop: duplex mode %d",
2929 			    *(uint8_t *)pr_val));
2930 			break;
2931 
2932 		case MAC_PROP_SPEED:
2933 			*perm = MAC_PROP_PERM_READ;
2934 			if (pr_valsize < sizeof (uint64_t))
2935 				return (EINVAL);
2936 			tmp = statsp->mac_stats.link_speed * 1000000ull;
2937 			bcopy(&tmp, pr_val, sizeof (tmp));
2938 			break;
2939 
2940 		case MAC_PROP_STATUS:
2941 			*perm = MAC_PROP_PERM_READ;
2942 			if (pr_valsize < sizeof (link_state_t))
2943 				return (EINVAL);
2944 			if (!statsp->mac_stats.link_up)
2945 				ls = LINK_STATE_DOWN;
2946 			else
2947 				ls = LINK_STATE_UP;
2948 			bcopy(&ls, pr_val, sizeof (ls));
2949 			break;
2950 
2951 		case MAC_PROP_FLOWCTRL:
2952 			/*
2953 			 * Flow control is supported by the shared domain and
2954 			 * it is currently transmit only
2955 			 */
2956 			*perm = MAC_PROP_PERM_READ;
2957 			if (pr_valsize < sizeof (link_flowctrl_t))
2958 				return (EINVAL);
2959 			fl = LINK_FLOWCTRL_TX;
2960 			bcopy(&fl, pr_val, sizeof (fl));
2961 			break;
2962 		case MAC_PROP_AUTONEG:
2963 			/* 10G link only and it is not negotiable */
2964 			*perm = MAC_PROP_PERM_READ;
2965 			*(uint8_t *)pr_val = 0;
2966 			break;
2967 		case MAC_PROP_ADV_1000FDX_CAP:
2968 		case MAC_PROP_ADV_100FDX_CAP:
2969 		case MAC_PROP_ADV_10FDX_CAP:
2970 		case MAC_PROP_ADV_1000HDX_CAP:
2971 		case MAC_PROP_ADV_100HDX_CAP:
2972 		case MAC_PROP_ADV_10HDX_CAP:
2973 		case MAC_PROP_EN_1000FDX_CAP:
2974 		case MAC_PROP_EN_100FDX_CAP:
2975 		case MAC_PROP_EN_10FDX_CAP:
2976 		case MAC_PROP_EN_1000HDX_CAP:
2977 		case MAC_PROP_EN_100HDX_CAP:
2978 		case MAC_PROP_EN_10HDX_CAP:
2979 			err = ENOTSUP;
2980 			break;
2981 
2982 		case MAC_PROP_PRIVATE:
2983 			err = hxge_get_priv_prop(hxgep, pr_name, pr_flags,
2984 			    pr_valsize, pr_val);
2985 			break;
2986 		default:
2987 			err = EINVAL;
2988 			break;
2989 	}
2990 
2991 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
2992 
2993 	return (err);
2994 }
2995 
2996 /* ARGSUSED */
2997 static int
2998 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
2999     const void *pr_val)
3000 {
3001 	p_hxge_param_t	param_arr = hxgep->param_arr;
3002 	int		err = 0;
3003 
3004 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3005 	    "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3006 
3007 	if (pr_val == NULL) {
3008 		return (EINVAL);
3009 	}
3010 
3011 	/* Blanking */
3012 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3013 		err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3014 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3015 	} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3016 		err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3017 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3018 
3019 	/* Classification */
3020 	} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3021 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3022 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3023 	} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3024 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3025 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3026 	} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3027 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3028 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3029 	} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3030 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3031 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3032 	} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3033 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3034 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3035 	} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3036 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3037 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3038 	} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3039 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3040 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3041 	} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3042 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3043 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3044 	} else {
3045 		err = EINVAL;
3046 	}
3047 
3048 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3049 	    "<== hxge_set_priv_prop: err %d", err));
3050 
3051 	return (err);
3052 }
3053 
3054 static int
3055 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags,
3056     uint_t pr_valsize, void *pr_val)
3057 {
3058 	p_hxge_param_t	param_arr = hxgep->param_arr;
3059 	char		valstr[MAXNAMELEN];
3060 	int		err = 0;
3061 	uint_t		strsize;
3062 	int		value = 0;
3063 
3064 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3065 	    "==> hxge_get_priv_prop: property %s", pr_name));
3066 
3067 	if (pr_flags & MAC_PROP_DEFAULT) {
3068 		/* Receive Interrupt Blanking Parameters */
3069 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3070 			value = RXDMA_RCR_TO_DEFAULT;
3071 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3072 			value = RXDMA_RCR_PTHRES_DEFAULT;
3073 
3074 		/* Classification and Load Distribution Configuration */
3075 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3076 		    strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3077 		    strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3078 		    strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3079 		    strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3080 		    strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3081 		    strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3082 		    strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3083 			value = HXGE_CLASS_TCAM_LOOKUP;
3084 		} else {
3085 			err = EINVAL;
3086 		}
3087 	} else {
3088 		/* Receive Interrupt Blanking Parameters */
3089 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3090 			value = hxgep->intr_timeout;
3091 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3092 			value = hxgep->intr_threshold;
3093 
3094 		/* Classification and Load Distribution Configuration */
3095 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3096 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3097 			    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3098 
3099 			value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3100 		} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3101 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3102 			    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3103 
3104 			value = (int)param_arr[param_class_opt_ipv4_udp].value;
3105 		} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3106 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3107 			    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3108 
3109 			value = (int)param_arr[param_class_opt_ipv4_ah].value;
3110 		} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3111 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3112 			    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3113 
3114 			value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3115 		} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3116 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3117 			    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3118 
3119 			value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3120 		} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3121 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3122 			    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3123 
3124 			value = (int)param_arr[param_class_opt_ipv6_udp].value;
3125 		} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3126 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3127 			    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3128 
3129 			value = (int)param_arr[param_class_opt_ipv6_ah].value;
3130 		} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3131 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3132 			    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3133 
3134 			value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3135 		} else {
3136 			err = EINVAL;
3137 		}
3138 	}
3139 
3140 	if (err == 0) {
3141 		(void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3142 
3143 		strsize = (uint_t)strlen(valstr);
3144 		if (pr_valsize < strsize) {
3145 			err = ENOBUFS;
3146 		} else {
3147 			(void) strlcpy(pr_val, valstr, pr_valsize);
3148 		}
3149 	}
3150 
3151 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3152 	    "<== hxge_get_priv_prop: return %d", err));
3153 
3154 	return (err);
3155 }
3156 /*
3157  * Module loading and removing entry points.
3158  */
3159 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3160     nodev, NULL, D_MP, NULL, NULL);
3161 
3162 extern struct mod_ops mod_driverops;
3163 
3164 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
3165 
3166 /*
3167  * Module linkage information for the kernel.
3168  */
3169 static struct modldrv hxge_modldrv = {
3170 	&mod_driverops,
3171 	HXGE_DESC_VER,
3172 	&hxge_dev_ops
3173 };
3174 
3175 static struct modlinkage modlinkage = {
3176 	MODREV_1, (void *) &hxge_modldrv, NULL
3177 };
3178 
3179 int
3180 _init(void)
3181 {
3182 	int status;
3183 
3184 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3185 	mac_init_ops(&hxge_dev_ops, "hxge");
3186 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3187 	if (status != 0) {
3188 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3189 		    "failed to init device soft state"));
3190 		mac_fini_ops(&hxge_dev_ops);
3191 		goto _init_exit;
3192 	}
3193 
3194 	status = mod_install(&modlinkage);
3195 	if (status != 0) {
3196 		ddi_soft_state_fini(&hxge_list);
3197 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3198 		goto _init_exit;
3199 	}
3200 
3201 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3202 
3203 _init_exit:
3204 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3205 
3206 	return (status);
3207 }
3208 
3209 int
3210 _fini(void)
3211 {
3212 	int status;
3213 
3214 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3215 
3216 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3217 
3218 	if (hxge_mblks_pending)
3219 		return (EBUSY);
3220 
3221 	status = mod_remove(&modlinkage);
3222 	if (status != DDI_SUCCESS) {
3223 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
3224 		    "Module removal failed 0x%08x", status));
3225 		goto _fini_exit;
3226 	}
3227 
3228 	mac_fini_ops(&hxge_dev_ops);
3229 
3230 	ddi_soft_state_fini(&hxge_list);
3231 
3232 	MUTEX_DESTROY(&hxge_common_lock);
3233 
3234 _fini_exit:
3235 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3236 
3237 	return (status);
3238 }
3239 
3240 int
3241 _info(struct modinfo *modinfop)
3242 {
3243 	int status;
3244 
3245 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3246 	status = mod_info(&modlinkage, modinfop);
3247 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3248 
3249 	return (status);
3250 }
3251 
3252 /*ARGSUSED*/
3253 hxge_status_t
3254 hxge_add_intrs(p_hxge_t hxgep)
3255 {
3256 	int		intr_types;
3257 	int		type = 0;
3258 	int		ddi_status = DDI_SUCCESS;
3259 	hxge_status_t	status = HXGE_OK;
3260 
3261 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3262 
3263 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
3264 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3265 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
3266 	hxgep->hxge_intr_type.intr_added = 0;
3267 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3268 	hxgep->hxge_intr_type.intr_type = 0;
3269 
3270 	if (hxge_msi_enable) {
3271 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3272 	}
3273 
3274 	/* Get the supported interrupt types */
3275 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3276 	    != DDI_SUCCESS) {
3277 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3278 		    "ddi_intr_get_supported_types failed: status 0x%08x",
3279 		    ddi_status));
3280 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3281 	}
3282 
3283 	hxgep->hxge_intr_type.intr_types = intr_types;
3284 
3285 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3286 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3287 
3288 	/*
3289 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3290 	 *	(1): 1 - MSI
3291 	 *	(2): 2 - MSI-X
3292 	 *	others - FIXED
3293 	 */
3294 	switch (hxge_msi_enable) {
3295 	default:
3296 		type = DDI_INTR_TYPE_FIXED;
3297 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3298 		    "use fixed (intx emulation) type %08x", type));
3299 		break;
3300 
3301 	case 2:
3302 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3303 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3304 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3305 			type = DDI_INTR_TYPE_MSIX;
3306 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3307 			    "==> hxge_add_intrs: "
3308 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3309 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3310 			type = DDI_INTR_TYPE_MSI;
3311 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3312 			    "==> hxge_add_intrs: "
3313 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3314 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3315 			type = DDI_INTR_TYPE_FIXED;
3316 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3317 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3318 		}
3319 		break;
3320 
3321 	case 1:
3322 		if (intr_types & DDI_INTR_TYPE_MSI) {
3323 			type = DDI_INTR_TYPE_MSI;
3324 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3325 			    "==> hxge_add_intrs: "
3326 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3327 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3328 			type = DDI_INTR_TYPE_MSIX;
3329 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3330 			    "==> hxge_add_intrs: "
3331 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3332 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3333 			type = DDI_INTR_TYPE_FIXED;
3334 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3335 			    "==> hxge_add_intrs: "
3336 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3337 		}
3338 	}
3339 
3340 	hxgep->hxge_intr_type.intr_type = type;
3341 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3342 	    type == DDI_INTR_TYPE_FIXED) &&
3343 	    hxgep->hxge_intr_type.niu_msi_enable) {
3344 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3345 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3346 			    " hxge_add_intrs: "
3347 			    " hxge_add_intrs_adv failed: status 0x%08x",
3348 			    status));
3349 			return (status);
3350 		} else {
3351 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3352 			    "interrupts registered : type %d", type));
3353 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3354 
3355 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3356 			    "\nAdded advanced hxge add_intr_adv "
3357 			    "intr type 0x%x\n", type));
3358 
3359 			return (status);
3360 		}
3361 	}
3362 
3363 	if (!hxgep->hxge_intr_type.intr_registered) {
3364 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3365 		    "==> hxge_add_intrs: failed to register interrupts"));
3366 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3367 	}
3368 
3369 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3370 
3371 	return (status);
3372 }
3373 
3374 /*ARGSUSED*/
3375 static hxge_status_t
3376 hxge_add_soft_intrs(p_hxge_t hxgep)
3377 {
3378 	int		ddi_status = DDI_SUCCESS;
3379 	hxge_status_t	status = HXGE_OK;
3380 
3381 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
3382 
3383 	hxgep->resched_id = NULL;
3384 	hxgep->resched_running = B_FALSE;
3385 	ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
3386 	    &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
3387 	if (ddi_status != DDI_SUCCESS) {
3388 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
3389 		    "ddi_add_softintrs failed: status 0x%08x", ddi_status));
3390 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3391 	}
3392 
3393 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
3394 
3395 	return (status);
3396 }
3397 
3398 /*ARGSUSED*/
3399 static hxge_status_t
3400 hxge_add_intrs_adv(p_hxge_t hxgep)
3401 {
3402 	int		intr_type;
3403 	p_hxge_intr_t	intrp;
3404 	hxge_status_t	status;
3405 
3406 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3407 
3408 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3409 	intr_type = intrp->intr_type;
3410 
3411 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3412 	    intr_type));
3413 
3414 	switch (intr_type) {
3415 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3416 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3417 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3418 		break;
3419 
3420 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3421 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3422 		break;
3423 
3424 	default:
3425 		status = HXGE_ERROR;
3426 		break;
3427 	}
3428 
3429 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3430 
3431 	return (status);
3432 }
3433 
3434 /*ARGSUSED*/
3435 static hxge_status_t
3436 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3437 {
3438 	dev_info_t	*dip = hxgep->dip;
3439 	p_hxge_ldg_t	ldgp;
3440 	p_hxge_intr_t	intrp;
3441 	uint_t		*inthandler;
3442 	void		*arg1, *arg2;
3443 	int		behavior;
3444 	int		nintrs, navail;
3445 	int		nactual, nrequired, nrequest;
3446 	int		inum = 0;
3447 	int		loop = 0;
3448 	int		x, y;
3449 	int		ddi_status = DDI_SUCCESS;
3450 	hxge_status_t	status = HXGE_OK;
3451 
3452 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3453 
3454 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3455 
3456 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3457 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3458 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3459 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3460 		    "nintrs: %d", ddi_status, nintrs));
3461 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3462 	}
3463 
3464 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3465 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3466 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3467 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3468 		    "nintrs: %d", ddi_status, navail));
3469 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3470 	}
3471 
3472 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3473 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3474 	    int_type, nintrs, navail));
3475 
3476 	/* PSARC/2007/453 MSI-X interrupt limit override */
3477 	if (int_type == DDI_INTR_TYPE_MSIX) {
3478 		nrequest = hxge_create_msi_property(hxgep);
3479 		if (nrequest < navail) {
3480 			navail = nrequest;
3481 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3482 			    "hxge_add_intrs_adv_type: nintrs %d "
3483 			    "navail %d (nrequest %d)",
3484 			    nintrs, navail, nrequest));
3485 		}
3486 	}
3487 
3488 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3489 		/* MSI must be power of 2 */
3490 		if ((navail & 16) == 16) {
3491 			navail = 16;
3492 		} else if ((navail & 8) == 8) {
3493 			navail = 8;
3494 		} else if ((navail & 4) == 4) {
3495 			navail = 4;
3496 		} else if ((navail & 2) == 2) {
3497 			navail = 2;
3498 		} else {
3499 			navail = 1;
3500 		}
3501 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3502 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3503 		    "navail %d", nintrs, navail));
3504 	}
3505 
3506 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3507 	    "requesting: intr type %d nintrs %d, navail %d",
3508 	    int_type, nintrs, navail));
3509 
3510 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3511 	    DDI_INTR_ALLOC_NORMAL);
3512 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3513 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3514 
3515 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3516 	    navail, &nactual, behavior);
3517 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3518 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3519 		    " ddi_intr_alloc() failed: %d", ddi_status));
3520 		kmem_free(intrp->htable, intrp->intr_size);
3521 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3522 	}
3523 
3524 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3525 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3526 	    navail, nactual));
3527 
3528 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3529 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3530 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3531 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3532 		/* Free already allocated interrupts */
3533 		for (y = 0; y < nactual; y++) {
3534 			(void) ddi_intr_free(intrp->htable[y]);
3535 		}
3536 
3537 		kmem_free(intrp->htable, intrp->intr_size);
3538 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3539 	}
3540 
3541 	nrequired = 0;
3542 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3543 	if (status != HXGE_OK) {
3544 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3545 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3546 		    "failed: 0x%x", status));
3547 		/* Free already allocated interrupts */
3548 		for (y = 0; y < nactual; y++) {
3549 			(void) ddi_intr_free(intrp->htable[y]);
3550 		}
3551 
3552 		kmem_free(intrp->htable, intrp->intr_size);
3553 		return (status);
3554 	}
3555 
3556 	ldgp = hxgep->ldgvp->ldgp;
3557 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3558 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3559 
3560 	if (nactual < nrequired)
3561 		loop = nactual;
3562 	else
3563 		loop = nrequired;
3564 
3565 	for (x = 0; x < loop; x++, ldgp++) {
3566 		ldgp->vector = (uint8_t)x;
3567 		arg1 = ldgp->ldvp;
3568 		arg2 = hxgep;
3569 		if (ldgp->nldvs == 1) {
3570 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3571 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3572 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3573 			    "1-1 int handler (entry %d)\n",
3574 			    arg1, arg2, x));
3575 		} else if (ldgp->nldvs > 1) {
3576 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3577 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3578 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3579 			    "nldevs %d int handler (entry %d)\n",
3580 			    arg1, arg2, ldgp->nldvs, x));
3581 		}
3582 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3583 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3584 		    "htable 0x%llx", x, intrp->htable[x]));
3585 
3586 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3587 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3588 		    DDI_SUCCESS) {
3589 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3590 			    "==> hxge_add_intrs_adv_type: failed #%d "
3591 			    "status 0x%x", x, ddi_status));
3592 			for (y = 0; y < intrp->intr_added; y++) {
3593 				(void) ddi_intr_remove_handler(
3594 				    intrp->htable[y]);
3595 			}
3596 
3597 			/* Free already allocated intr */
3598 			for (y = 0; y < nactual; y++) {
3599 				(void) ddi_intr_free(intrp->htable[y]);
3600 			}
3601 			kmem_free(intrp->htable, intrp->intr_size);
3602 
3603 			(void) hxge_ldgv_uninit(hxgep);
3604 
3605 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3606 		}
3607 
3608 		intrp->intr_added++;
3609 	}
3610 	intrp->msi_intx_cnt = nactual;
3611 
3612 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3613 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3614 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3615 
3616 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3617 	(void) hxge_intr_ldgv_init(hxgep);
3618 
3619 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3620 
3621 	return (status);
3622 }
3623 
3624 /*ARGSUSED*/
3625 static hxge_status_t
3626 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3627 {
3628 	dev_info_t	*dip = hxgep->dip;
3629 	p_hxge_ldg_t	ldgp;
3630 	p_hxge_intr_t	intrp;
3631 	uint_t		*inthandler;
3632 	void		*arg1, *arg2;
3633 	int		behavior;
3634 	int		nintrs, navail;
3635 	int		nactual, nrequired;
3636 	int		inum = 0;
3637 	int		x, y;
3638 	int		ddi_status = DDI_SUCCESS;
3639 	hxge_status_t	status = HXGE_OK;
3640 
3641 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3642 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3643 
3644 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3645 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3646 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3647 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3648 		    "nintrs: %d", status, nintrs));
3649 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3650 	}
3651 
3652 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3653 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3654 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3655 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3656 		    "nintrs: %d", ddi_status, navail));
3657 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3658 	}
3659 
3660 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3661 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3662 	    nintrs, navail));
3663 
3664 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3665 	    DDI_INTR_ALLOC_NORMAL);
3666 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3667 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
3668 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3669 	    navail, &nactual, behavior);
3670 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3671 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3672 		    " ddi_intr_alloc() failed: %d", ddi_status));
3673 		kmem_free(intrp->htable, intrp->intr_size);
3674 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3675 	}
3676 
3677 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3678 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3679 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3680 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3681 		/* Free already allocated interrupts */
3682 		for (y = 0; y < nactual; y++) {
3683 			(void) ddi_intr_free(intrp->htable[y]);
3684 		}
3685 
3686 		kmem_free(intrp->htable, intrp->intr_size);
3687 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3688 	}
3689 
3690 	nrequired = 0;
3691 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3692 	if (status != HXGE_OK) {
3693 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3694 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
3695 		    "failed: 0x%x", status));
3696 		/* Free already allocated interrupts */
3697 		for (y = 0; y < nactual; y++) {
3698 			(void) ddi_intr_free(intrp->htable[y]);
3699 		}
3700 
3701 		kmem_free(intrp->htable, intrp->intr_size);
3702 		return (status);
3703 	}
3704 
3705 	ldgp = hxgep->ldgvp->ldgp;
3706 	for (x = 0; x < nrequired; x++, ldgp++) {
3707 		ldgp->vector = (uint8_t)x;
3708 		arg1 = ldgp->ldvp;
3709 		arg2 = hxgep;
3710 		if (ldgp->nldvs == 1) {
3711 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3712 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3713 			    "hxge_add_intrs_adv_type_fix: "
3714 			    "1-1 int handler(%d) ldg %d ldv %d "
3715 			    "arg1 $%p arg2 $%p\n",
3716 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
3717 		} else if (ldgp->nldvs > 1) {
3718 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3719 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3720 			    "hxge_add_intrs_adv_type_fix: "
3721 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
3722 			    "arg1 0x%016llx arg2 0x%016llx\n",
3723 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
3724 			    arg1, arg2));
3725 		}
3726 
3727 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3728 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3729 		    DDI_SUCCESS) {
3730 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3731 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
3732 			    "status 0x%x", x, ddi_status));
3733 			for (y = 0; y < intrp->intr_added; y++) {
3734 				(void) ddi_intr_remove_handler(
3735 				    intrp->htable[y]);
3736 			}
3737 			for (y = 0; y < nactual; y++) {
3738 				(void) ddi_intr_free(intrp->htable[y]);
3739 			}
3740 			/* Free already allocated intr */
3741 			kmem_free(intrp->htable, intrp->intr_size);
3742 
3743 			(void) hxge_ldgv_uninit(hxgep);
3744 
3745 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3746 		}
3747 		intrp->intr_added++;
3748 	}
3749 
3750 	intrp->msi_intx_cnt = nactual;
3751 
3752 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3753 
3754 	status = hxge_intr_ldgv_init(hxgep);
3755 
3756 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
3757 
3758 	return (status);
3759 }
3760 
3761 /*ARGSUSED*/
3762 static void
3763 hxge_remove_intrs(p_hxge_t hxgep)
3764 {
3765 	int		i, inum;
3766 	p_hxge_intr_t	intrp;
3767 
3768 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
3769 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3770 	if (!intrp->intr_registered) {
3771 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3772 		    "<== hxge_remove_intrs: interrupts not registered"));
3773 		return;
3774 	}
3775 
3776 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
3777 
3778 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3779 		(void) ddi_intr_block_disable(intrp->htable,
3780 		    intrp->intr_added);
3781 	} else {
3782 		for (i = 0; i < intrp->intr_added; i++) {
3783 			(void) ddi_intr_disable(intrp->htable[i]);
3784 		}
3785 	}
3786 
3787 	for (inum = 0; inum < intrp->intr_added; inum++) {
3788 		if (intrp->htable[inum]) {
3789 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
3790 		}
3791 	}
3792 
3793 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
3794 		if (intrp->htable[inum]) {
3795 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3796 			    "hxge_remove_intrs: ddi_intr_free inum %d "
3797 			    "msi_intx_cnt %d intr_added %d",
3798 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
3799 
3800 			(void) ddi_intr_free(intrp->htable[inum]);
3801 		}
3802 	}
3803 
3804 	kmem_free(intrp->htable, intrp->intr_size);
3805 	intrp->intr_registered = B_FALSE;
3806 	intrp->intr_enabled = B_FALSE;
3807 	intrp->msi_intx_cnt = 0;
3808 	intrp->intr_added = 0;
3809 
3810 	(void) hxge_ldgv_uninit(hxgep);
3811 
3812 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
3813 }
3814 
3815 /*ARGSUSED*/
3816 static void
3817 hxge_remove_soft_intrs(p_hxge_t hxgep)
3818 {
3819 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
3820 
3821 	if (hxgep->resched_id) {
3822 		ddi_remove_softintr(hxgep->resched_id);
3823 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3824 		    "==> hxge_remove_soft_intrs: removed"));
3825 		hxgep->resched_id = NULL;
3826 	}
3827 
3828 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
3829 }
3830 
3831 /*ARGSUSED*/
3832 void
3833 hxge_intrs_enable(p_hxge_t hxgep)
3834 {
3835 	p_hxge_intr_t	intrp;
3836 	int		i;
3837 	int		status;
3838 
3839 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
3840 
3841 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3842 
3843 	if (!intrp->intr_registered) {
3844 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
3845 		    "interrupts are not registered"));
3846 		return;
3847 	}
3848 
3849 	if (intrp->intr_enabled) {
3850 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3851 		    "<== hxge_intrs_enable: already enabled"));
3852 		return;
3853 	}
3854 
3855 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3856 		status = ddi_intr_block_enable(intrp->htable,
3857 		    intrp->intr_added);
3858 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
3859 		    "block enable - status 0x%x total inums #%d\n",
3860 		    status, intrp->intr_added));
3861 	} else {
3862 		for (i = 0; i < intrp->intr_added; i++) {
3863 			status = ddi_intr_enable(intrp->htable[i]);
3864 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
3865 			    "ddi_intr_enable:enable - status 0x%x "
3866 			    "total inums %d enable inum #%d\n",
3867 			    status, intrp->intr_added, i));
3868 			if (status == DDI_SUCCESS) {
3869 				intrp->intr_enabled = B_TRUE;
3870 			}
3871 		}
3872 	}
3873 
3874 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
3875 }
3876 
3877 /*ARGSUSED*/
3878 static void
3879 hxge_intrs_disable(p_hxge_t hxgep)
3880 {
3881 	p_hxge_intr_t	intrp;
3882 	int		i;
3883 
3884 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
3885 
3886 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3887 
3888 	if (!intrp->intr_registered) {
3889 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
3890 		    "interrupts are not registered"));
3891 		return;
3892 	}
3893 
3894 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3895 		(void) ddi_intr_block_disable(intrp->htable,
3896 		    intrp->intr_added);
3897 	} else {
3898 		for (i = 0; i < intrp->intr_added; i++) {
3899 			(void) ddi_intr_disable(intrp->htable[i]);
3900 		}
3901 	}
3902 
3903 	intrp->intr_enabled = B_FALSE;
3904 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
3905 }
3906 
3907 static hxge_status_t
3908 hxge_mac_register(p_hxge_t hxgep)
3909 {
3910 	mac_register_t	*macp;
3911 	int		status;
3912 
3913 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
3914 
3915 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
3916 		return (HXGE_ERROR);
3917 
3918 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
3919 	macp->m_driver = hxgep;
3920 	macp->m_dip = hxgep->dip;
3921 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
3922 
3923 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3924 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
3925 	    macp->m_src_addr[0],
3926 	    macp->m_src_addr[1],
3927 	    macp->m_src_addr[2],
3928 	    macp->m_src_addr[3],
3929 	    macp->m_src_addr[4],
3930 	    macp->m_src_addr[5]));
3931 
3932 	macp->m_callbacks = &hxge_m_callbacks;
3933 	macp->m_min_sdu = 0;
3934 	macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
3935 	macp->m_margin = VLAN_TAGSZ;
3936 	macp->m_priv_props = hxge_priv_props;
3937 	macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS;
3938 
3939 	status = mac_register(macp, &hxgep->mach);
3940 	mac_free(macp);
3941 
3942 	if (status != 0) {
3943 		cmn_err(CE_WARN,
3944 		    "hxge_mac_register failed (status %d instance %d)",
3945 		    status, hxgep->instance);
3946 		return (HXGE_ERROR);
3947 	}
3948 
3949 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
3950 	    "(instance %d)", hxgep->instance));
3951 
3952 	return (HXGE_OK);
3953 }
3954 
3955 static int
3956 hxge_init_common_dev(p_hxge_t hxgep)
3957 {
3958 	p_hxge_hw_list_t	hw_p;
3959 	dev_info_t		*p_dip;
3960 
3961 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
3962 
3963 	p_dip = hxgep->p_dip;
3964 	MUTEX_ENTER(&hxge_common_lock);
3965 
3966 	/*
3967 	 * Loop through existing per Hydra hardware list.
3968 	 */
3969 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
3970 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3971 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
3972 		    hw_p, p_dip));
3973 		if (hw_p->parent_devp == p_dip) {
3974 			hxgep->hxge_hw_p = hw_p;
3975 			hw_p->ndevs++;
3976 			hw_p->hxge_p = hxgep;
3977 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3978 			    "==> hxge_init_common_device: "
3979 			    "hw_p $%p parent dip $%p ndevs %d (found)",
3980 			    hw_p, p_dip, hw_p->ndevs));
3981 			break;
3982 		}
3983 	}
3984 
3985 	if (hw_p == NULL) {
3986 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3987 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
3988 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
3989 		hw_p->parent_devp = p_dip;
3990 		hw_p->magic = HXGE_MAGIC;
3991 		hxgep->hxge_hw_p = hw_p;
3992 		hw_p->ndevs++;
3993 		hw_p->hxge_p = hxgep;
3994 		hw_p->next = hxge_hw_list;
3995 
3996 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
3997 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
3998 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
3999 
4000 		hxge_hw_list = hw_p;
4001 	}
4002 	MUTEX_EXIT(&hxge_common_lock);
4003 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4004 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4005 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4006 
4007 	return (HXGE_OK);
4008 }
4009 
4010 static void
4011 hxge_uninit_common_dev(p_hxge_t hxgep)
4012 {
4013 	p_hxge_hw_list_t	hw_p, h_hw_p;
4014 	dev_info_t		*p_dip;
4015 
4016 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4017 	if (hxgep->hxge_hw_p == NULL) {
4018 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4019 		    "<== hxge_uninit_common_dev (no common)"));
4020 		return;
4021 	}
4022 
4023 	MUTEX_ENTER(&hxge_common_lock);
4024 	h_hw_p = hxge_hw_list;
4025 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4026 		p_dip = hw_p->parent_devp;
4027 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4028 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4029 		    hw_p->magic == HXGE_MAGIC) {
4030 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4031 			    "==> hxge_uninit_common_dev: "
4032 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4033 			    hw_p, p_dip, hw_p->ndevs));
4034 
4035 			hxgep->hxge_hw_p = NULL;
4036 			if (hw_p->ndevs) {
4037 				hw_p->ndevs--;
4038 			}
4039 			hw_p->hxge_p = NULL;
4040 			if (!hw_p->ndevs) {
4041 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4042 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4043 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4044 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4045 				    "==> hxge_uninit_common_dev: "
4046 				    "hw_p $%p parent dip $%p ndevs %d (last)",
4047 				    hw_p, p_dip, hw_p->ndevs));
4048 
4049 				if (hw_p == hxge_hw_list) {
4050 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4051 					    "==> hxge_uninit_common_dev:"
4052 					    "remove head "
4053 					    "hw_p $%p parent dip $%p "
4054 					    "ndevs %d (head)",
4055 					    hw_p, p_dip, hw_p->ndevs));
4056 					hxge_hw_list = hw_p->next;
4057 				} else {
4058 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4059 					    "==> hxge_uninit_common_dev:"
4060 					    "remove middle "
4061 					    "hw_p $%p parent dip $%p "
4062 					    "ndevs %d (middle)",
4063 					    hw_p, p_dip, hw_p->ndevs));
4064 					h_hw_p->next = hw_p->next;
4065 				}
4066 
4067 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4068 			}
4069 			break;
4070 		} else {
4071 			h_hw_p = hw_p;
4072 		}
4073 	}
4074 
4075 	MUTEX_EXIT(&hxge_common_lock);
4076 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4077 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4078 
4079 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4080 }
4081 
4082 #define	HXGE_MSIX_ENTRIES		32
4083 #define	HXGE_MSIX_WAIT_COUNT		10
4084 #define	HXGE_MSIX_PARITY_CHECK_COUNT	30
4085 
4086 static void
4087 hxge_link_poll(void *arg)
4088 {
4089 	p_hxge_t		hxgep = (p_hxge_t)arg;
4090 	hpi_handle_t		handle;
4091 	cip_link_stat_t		link_stat;
4092 	hxge_timeout		*to = &hxgep->timeout;
4093 
4094 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
4095 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4096 
4097 	if (to->report_link_status ||
4098 	    (to->link_status != link_stat.bits.xpcs0_link_up)) {
4099 		to->link_status = link_stat.bits.xpcs0_link_up;
4100 		to->report_link_status = B_FALSE;
4101 
4102 		if (link_stat.bits.xpcs0_link_up) {
4103 			hxge_link_update(hxgep, LINK_STATE_UP);
4104 		} else {
4105 			hxge_link_update(hxgep, LINK_STATE_DOWN);
4106 		}
4107 	}
4108 
4109 	if (hxgep->msix_count++ >= HXGE_MSIX_PARITY_CHECK_COUNT) {
4110 		hxgep->msix_count = 0;
4111 		hxgep->msix_index++;
4112 		if (hxgep->msix_index >= HXGE_MSIX_ENTRIES)
4113 			hxgep->msix_index = 0;
4114 		hxge_check_1entry_msix_table(hxgep, hxgep->msix_index);
4115 	}
4116 
4117 	/* Restart the link status timer to check the link status */
4118 	MUTEX_ENTER(&to->lock);
4119 	to->id = timeout(hxge_link_poll, arg, to->ticks);
4120 	MUTEX_EXIT(&to->lock);
4121 }
4122 
4123 static void
4124 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4125 {
4126 	p_hxge_stats_t		statsp = (p_hxge_stats_t)hxgep->statsp;
4127 
4128 	mac_link_update(hxgep->mach, state);
4129 	if (state == LINK_STATE_UP) {
4130 		statsp->mac_stats.link_speed = 10000;
4131 		statsp->mac_stats.link_duplex = 2;
4132 		statsp->mac_stats.link_up = 1;
4133 	} else {
4134 		statsp->mac_stats.link_speed = 0;
4135 		statsp->mac_stats.link_duplex = 0;
4136 		statsp->mac_stats.link_up = 0;
4137 	}
4138 }
4139 
4140 static void
4141 hxge_msix_init(p_hxge_t hxgep)
4142 {
4143 	uint32_t 		data0;
4144 	uint32_t 		data1;
4145 	uint32_t 		data2;
4146 	int			i;
4147 	uint32_t		msix_entry0;
4148 	uint32_t		msix_entry1;
4149 	uint32_t		msix_entry2;
4150 	uint32_t		msix_entry3;
4151 
4152 	/* Change to use MSIx bar instead of indirect access */
4153 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4154 		data0 = 0xffffffff - i;
4155 		data1 = 0xffffffff - i - 1;
4156 		data2 = 0xffffffff - i - 2;
4157 
4158 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4159 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4160 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4161 	}
4162 
4163 	/* Initialize ram data out buffer. */
4164 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4165 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4166 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4167 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4168 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4169 	}
4170 }
4171 
4172 static void
4173 hxge_store_msix_table(p_hxge_t hxgep)
4174 {
4175 	int			i;
4176 	uint32_t		msix_entry0;
4177 	uint32_t		msix_entry1;
4178 	uint32_t		msix_entry2;
4179 
4180 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4181 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4182 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4,
4183 		    &msix_entry1);
4184 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8,
4185 		    &msix_entry2);
4186 
4187 		hxgep->msix_table[i][0] = msix_entry0;
4188 		hxgep->msix_table[i][1] = msix_entry1;
4189 		hxgep->msix_table[i][2] = msix_entry2;
4190 	}
4191 }
4192 
4193 static void
4194 hxge_check_1entry_msix_table(p_hxge_t hxgep, int i)
4195 {
4196 	uint32_t		msix_entry0;
4197 	uint32_t		msix_entry1;
4198 	uint32_t		msix_entry2;
4199 	p_hxge_peu_sys_stats_t	statsp;
4200 
4201 	statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
4202 
4203 	HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4204 	HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4205 	HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4206 
4207 	hxgep->msix_table_check[i][0] = msix_entry0;
4208 	hxgep->msix_table_check[i][1] = msix_entry1;
4209 	hxgep->msix_table_check[i][2] = msix_entry2;
4210 
4211 	if ((hxgep->msix_table[i][0] != hxgep->msix_table_check[i][0]) ||
4212 	    (hxgep->msix_table[i][1] != hxgep->msix_table_check[i][1]) ||
4213 	    (hxgep->msix_table[i][2] != hxgep->msix_table_check[i][2])) {
4214 		statsp->eic_msix_parerr++;
4215 		if (statsp->eic_msix_parerr == 1) {
4216 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4217 			    "==> hxge_check_1entry_msix_table: "
4218 			    "eic_msix_parerr at index: %d", i));
4219 			HXGE_FM_REPORT_ERROR(hxgep, NULL,
4220 			    HXGE_FM_EREPORT_PEU_ERR);
4221 		}
4222 	}
4223 }
4224 
4225 /*
4226  * The following function is to support
4227  * PSARC/2007/453 MSI-X interrupt limit override.
4228  */
4229 static int
4230 hxge_create_msi_property(p_hxge_t hxgep)
4231 {
4232 	int	nmsi;
4233 	extern	int ncpus;
4234 
4235 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4236 
4237 	(void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4238 	    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4239 	/*
4240 	 * The maximum MSI-X requested will be 8.
4241 	 * If the # of CPUs is less than 8, we will reqeust
4242 	 * # MSI-X based on the # of CPUs.
4243 	 */
4244 	if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4245 		nmsi = HXGE_MSIX_REQUEST_10G;
4246 	} else {
4247 		nmsi = ncpus;
4248 	}
4249 
4250 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4251 	    "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4252 	    ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4253 	    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4254 
4255 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4256 	return (nmsi);
4257 }
4258