xref: /illumos-gate/usr/src/uts/common/io/hxge/hxge_main.c (revision 46b592853d0f4f11781b6b0a7533f267c6aee132)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28  */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31 
32 /*
33  * PSARC/2007/453 MSI-X interrupt limit override
34  * (This PSARC case is limited to MSI-X vectors
35  *  and SPARC platforms only).
36  */
37 uint32_t hxge_msi_enable = 2;
38 
39 /*
40  * Globals: tunable parameters (/etc/system or adb)
41  *
42  */
43 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
44 uint32_t hxge_rbr_spare_size = 0;
45 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
46 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
47 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
48 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
49 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
50 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
51 
52 static hxge_os_mutex_t hxgedebuglock;
53 static int hxge_debug_init = 0;
54 
55 /*
56  * Debugging flags:
57  *		hxge_no_tx_lb : transmit load balancing
58  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
59  *				   1 - From the Stack
60  *				   2 - Destination IP Address
61  */
62 uint32_t hxge_no_tx_lb = 0;
63 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
64 
65 /*
66  * Tunables to manage the receive buffer blocks.
67  *
68  * hxge_rx_threshold_hi: copy all buffers.
69  * hxge_rx_bcopy_size_type: receive buffer block size type.
70  * hxge_rx_threshold_lo: copy only up to tunable block size type.
71  */
72 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
73 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
74 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
75 
76 rtrace_t hpi_rtracebuf;
77 
78 /*
79  * Function Prototypes
80  */
81 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
82 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
83 static void hxge_unattach(p_hxge_t);
84 
85 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
86 
87 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
88 static void hxge_destroy_mutexes(p_hxge_t);
89 
90 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
91 static void hxge_unmap_regs(p_hxge_t hxgep);
92 
93 hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
94 static void hxge_remove_intrs(p_hxge_t hxgep);
95 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
96 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
97 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
98 void hxge_intrs_enable(p_hxge_t hxgep);
99 static void hxge_intrs_disable(p_hxge_t hxgep);
100 static void hxge_suspend(p_hxge_t);
101 static hxge_status_t hxge_resume(p_hxge_t);
102 hxge_status_t hxge_setup_dev(p_hxge_t);
103 static void hxge_destroy_dev(p_hxge_t);
104 hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
105 static void hxge_free_mem_pool(p_hxge_t);
106 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
107 static void hxge_free_rx_mem_pool(p_hxge_t);
108 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
109 static void hxge_free_tx_mem_pool(p_hxge_t);
110 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
111     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
112     p_hxge_dma_common_t);
113 static void hxge_dma_mem_free(p_hxge_dma_common_t);
114 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
115     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
116 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
117 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
118     p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
119 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
120 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
121     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
122 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
123 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
124     p_hxge_dma_common_t *, size_t);
125 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
126 static int hxge_init_common_dev(p_hxge_t);
127 static void hxge_uninit_common_dev(p_hxge_t);
128 
129 /*
130  * The next declarations are for the GLDv3 interface.
131  */
132 static int hxge_m_start(void *);
133 static void hxge_m_stop(void *);
134 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
135 static int hxge_m_promisc(void *, boolean_t);
136 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
137 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
138 
139 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
140 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
141 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
142     uint_t pr_valsize, const void *pr_val);
143 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
144     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *);
145 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num,
146     uint_t pr_valsize, void *pr_val);
147 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
148     uint_t pr_valsize, const void *pr_val);
149 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
150     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
151 static void hxge_link_poll(void *arg);
152 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
153 static void hxge_msix_init(p_hxge_t hxgep);
154 static void hxge_store_msix_table(p_hxge_t hxgep);
155 static void hxge_check_1entry_msix_table(p_hxge_t hxgep, int msix_index);
156 
157 mac_priv_prop_t hxge_priv_props[] = {
158 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
159 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
160 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
161 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
162 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
163 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
164 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
165 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
166 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
167 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
168 };
169 
170 #define	HXGE_MAX_PRIV_PROPS	\
171 	(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
172 
173 #define	HXGE_MAGIC	0x4E584745UL
174 #define	MAX_DUMP_SZ 256
175 
176 #define	HXGE_M_CALLBACK_FLAGS	\
177 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
178 
179 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
180 
181 static mac_callbacks_t hxge_m_callbacks = {
182 	HXGE_M_CALLBACK_FLAGS,
183 	hxge_m_stat,
184 	hxge_m_start,
185 	hxge_m_stop,
186 	hxge_m_promisc,
187 	hxge_m_multicst,
188 	NULL,
189 	NULL,
190 	hxge_m_ioctl,
191 	hxge_m_getcapab,
192 	NULL,
193 	NULL,
194 	hxge_m_setprop,
195 	hxge_m_getprop
196 };
197 
198 /* PSARC/2007/453 MSI-X interrupt limit override. */
199 #define	HXGE_MSIX_REQUEST_10G	8
200 static int hxge_create_msi_property(p_hxge_t);
201 
202 /* Enable debug messages as necessary. */
203 uint64_t hxge_debug_level = 0;
204 
205 /*
206  * This list contains the instance structures for the Hydra
207  * devices present in the system. The lock exists to guarantee
208  * mutually exclusive access to the list.
209  */
210 void *hxge_list = NULL;
211 void *hxge_hw_list = NULL;
212 hxge_os_mutex_t hxge_common_lock;
213 
214 extern uint64_t hpi_debug_level;
215 
216 extern hxge_status_t hxge_ldgv_init();
217 extern hxge_status_t hxge_ldgv_uninit();
218 extern hxge_status_t hxge_intr_ldgv_init();
219 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
220     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
221 extern void hxge_fm_fini(p_hxge_t hxgep);
222 
223 /*
224  * Count used to maintain the number of buffers being used
225  * by Hydra instances and loaned up to the upper layers.
226  */
227 uint32_t hxge_mblks_pending = 0;
228 
229 /*
230  * Device register access attributes for PIO.
231  */
232 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
233 	DDI_DEVICE_ATTR_V0,
234 	DDI_STRUCTURE_LE_ACC,
235 	DDI_STRICTORDER_ACC,
236 };
237 
238 /*
239  * Device descriptor access attributes for DMA.
240  */
241 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
242 	DDI_DEVICE_ATTR_V0,
243 	DDI_STRUCTURE_LE_ACC,
244 	DDI_STRICTORDER_ACC
245 };
246 
247 /*
248  * Device buffer access attributes for DMA.
249  */
250 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
251 	DDI_DEVICE_ATTR_V0,
252 	DDI_STRUCTURE_BE_ACC,
253 	DDI_STRICTORDER_ACC
254 };
255 
256 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
257 	DMA_ATTR_V0,		/* version number. */
258 	0,			/* low address */
259 	0xffffffffffffffff,	/* high address */
260 	0xffffffffffffffff,	/* address counter max */
261 	0x80000,		/* alignment */
262 	0xfc00fc,		/* dlim_burstsizes */
263 	0x1,			/* minimum transfer size */
264 	0xffffffffffffffff,	/* maximum transfer size */
265 	0xffffffffffffffff,	/* maximum segment size */
266 	1,			/* scatter/gather list length */
267 	(unsigned int)1,	/* granularity */
268 	0			/* attribute flags */
269 };
270 
271 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
272 	DMA_ATTR_V0,		/* version number. */
273 	0,			/* low address */
274 	0xffffffffffffffff,	/* high address */
275 	0xffffffffffffffff,	/* address counter max */
276 	0x100000,		/* alignment */
277 	0xfc00fc,		/* dlim_burstsizes */
278 	0x1,			/* minimum transfer size */
279 	0xffffffffffffffff,	/* maximum transfer size */
280 	0xffffffffffffffff,	/* maximum segment size */
281 	1,			/* scatter/gather list length */
282 	(unsigned int)1,	/* granularity */
283 	0			/* attribute flags */
284 };
285 
286 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
287 	DMA_ATTR_V0,		/* version number. */
288 	0,			/* low address */
289 	0xffffffffffffffff,	/* high address */
290 	0xffffffffffffffff,	/* address counter max */
291 	0x40000,		/* alignment */
292 	0xfc00fc,		/* dlim_burstsizes */
293 	0x1,			/* minimum transfer size */
294 	0xffffffffffffffff,	/* maximum transfer size */
295 	0xffffffffffffffff,	/* maximum segment size */
296 	1,			/* scatter/gather list length */
297 	(unsigned int)1,	/* granularity */
298 	0			/* attribute flags */
299 };
300 
301 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
302 	DMA_ATTR_V0,		/* version number. */
303 	0,			/* low address */
304 	0xffffffffffffffff,	/* high address */
305 	0xffffffffffffffff,	/* address counter max */
306 #if defined(_BIG_ENDIAN)
307 	0x2000,			/* alignment */
308 #else
309 	0x1000,			/* alignment */
310 #endif
311 	0xfc00fc,		/* dlim_burstsizes */
312 	0x1,			/* minimum transfer size */
313 	0xffffffffffffffff,	/* maximum transfer size */
314 	0xffffffffffffffff,	/* maximum segment size */
315 	5,			/* scatter/gather list length */
316 	(unsigned int)1,	/* granularity */
317 	0			/* attribute flags */
318 };
319 
320 ddi_dma_attr_t hxge_tx_dma_attr = {
321 	DMA_ATTR_V0,		/* version number. */
322 	0,			/* low address */
323 	0xffffffffffffffff,	/* high address */
324 	0xffffffffffffffff,	/* address counter max */
325 #if defined(_BIG_ENDIAN)
326 	0x2000,			/* alignment */
327 #else
328 	0x1000,			/* alignment */
329 #endif
330 	0xfc00fc,		/* dlim_burstsizes */
331 	0x1,			/* minimum transfer size */
332 	0xffffffffffffffff,	/* maximum transfer size */
333 	0xffffffffffffffff,	/* maximum segment size */
334 	5,			/* scatter/gather list length */
335 	(unsigned int)1,	/* granularity */
336 	0			/* attribute flags */
337 };
338 
339 ddi_dma_attr_t hxge_rx_dma_attr = {
340 	DMA_ATTR_V0,		/* version number. */
341 	0,			/* low address */
342 	0xffffffffffffffff,	/* high address */
343 	0xffffffffffffffff,	/* address counter max */
344 	0x10000,		/* alignment */
345 	0xfc00fc,		/* dlim_burstsizes */
346 	0x1,			/* minimum transfer size */
347 	0xffffffffffffffff,	/* maximum transfer size */
348 	0xffffffffffffffff,	/* maximum segment size */
349 	1,			/* scatter/gather list length */
350 	(unsigned int)1,	/* granularity */
351 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
352 };
353 
354 ddi_dma_lim_t hxge_dma_limits = {
355 	(uint_t)0,		/* dlim_addr_lo */
356 	(uint_t)0xffffffff,	/* dlim_addr_hi */
357 	(uint_t)0xffffffff,	/* dlim_cntr_max */
358 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
359 	0x1,			/* dlim_minxfer */
360 	1024			/* dlim_speed */
361 };
362 
363 dma_method_t hxge_force_dma = DVMA;
364 
365 /*
366  * dma chunk sizes.
367  *
368  * Try to allocate the largest possible size
369  * so that fewer number of dma chunks would be managed
370  */
371 size_t alloc_sizes[] = {
372     0x1000, 0x2000, 0x4000, 0x8000,
373     0x10000, 0x20000, 0x40000, 0x80000,
374     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
375 };
376 
377 /*
378  * Translate "dev_t" to a pointer to the associated "dev_info_t".
379  */
380 static int
381 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
382 {
383 	p_hxge_t	hxgep = NULL;
384 	int		instance;
385 	int		status = DDI_SUCCESS;
386 	int		i;
387 
388 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
389 
390 	/*
391 	 * Get the device instance since we'll need to setup or retrieve a soft
392 	 * state for this instance.
393 	 */
394 	instance = ddi_get_instance(dip);
395 
396 	switch (cmd) {
397 	case DDI_ATTACH:
398 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
399 		break;
400 
401 	case DDI_RESUME:
402 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
403 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
404 		if (hxgep == NULL) {
405 			status = DDI_FAILURE;
406 			break;
407 		}
408 		if (hxgep->dip != dip) {
409 			status = DDI_FAILURE;
410 			break;
411 		}
412 		if (hxgep->suspended == DDI_PM_SUSPEND) {
413 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
414 		} else {
415 			(void) hxge_resume(hxgep);
416 		}
417 		goto hxge_attach_exit;
418 
419 	case DDI_PM_RESUME:
420 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
421 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
422 		if (hxgep == NULL) {
423 			status = DDI_FAILURE;
424 			break;
425 		}
426 		if (hxgep->dip != dip) {
427 			status = DDI_FAILURE;
428 			break;
429 		}
430 		(void) hxge_resume(hxgep);
431 		goto hxge_attach_exit;
432 
433 	default:
434 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
435 		status = DDI_FAILURE;
436 		goto hxge_attach_exit;
437 	}
438 
439 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
440 		status = DDI_FAILURE;
441 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
442 		    "ddi_soft_state_zalloc failed"));
443 		goto hxge_attach_exit;
444 	}
445 
446 	hxgep = ddi_get_soft_state(hxge_list, instance);
447 	if (hxgep == NULL) {
448 		status = HXGE_ERROR;
449 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
450 		    "ddi_get_soft_state failed"));
451 		goto hxge_attach_fail2;
452 	}
453 
454 	hxgep->drv_state = 0;
455 	hxgep->dip = dip;
456 	hxgep->instance = instance;
457 	hxgep->p_dip = ddi_get_parent(dip);
458 	hxgep->hxge_debug_level = hxge_debug_level;
459 	hpi_debug_level = hxge_debug_level;
460 
461 	/*
462 	 * Initialize MMAC struture.
463 	 */
464 	(void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
465 	hxgep->mmac.available = hxgep->mmac.total;
466 	for (i = 0; i < hxgep->mmac.total; i++) {
467 		hxgep->mmac.addrs[i].set = B_FALSE;
468 		hxgep->mmac.addrs[i].primary = B_FALSE;
469 	}
470 
471 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
472 	    &hxge_rx_dma_attr);
473 
474 	status = hxge_map_regs(hxgep);
475 	if (status != HXGE_OK) {
476 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
477 		goto hxge_attach_fail3;
478 	}
479 
480 	status = hxge_init_common_dev(hxgep);
481 	if (status != HXGE_OK) {
482 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
483 		    "hxge_init_common_dev failed"));
484 		goto hxge_attach_fail4;
485 	}
486 
487 	/*
488 	 * Setup the Ndd parameters for this instance.
489 	 */
490 	hxge_init_param(hxgep);
491 
492 	/*
493 	 * Setup Register Tracing Buffer.
494 	 */
495 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
496 
497 	/* init stats ptr */
498 	hxge_init_statsp(hxgep);
499 
500 	status = hxge_setup_mutexes(hxgep);
501 	if (status != HXGE_OK) {
502 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
503 		goto hxge_attach_fail;
504 	}
505 
506 	/* Scrub the MSI-X memory */
507 	hxge_msix_init(hxgep);
508 
509 	status = hxge_get_config_properties(hxgep);
510 	if (status != HXGE_OK) {
511 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
512 		goto hxge_attach_fail;
513 	}
514 
515 	/*
516 	 * Setup the Kstats for the driver.
517 	 */
518 	hxge_setup_kstats(hxgep);
519 	hxge_setup_param(hxgep);
520 
521 	status = hxge_setup_system_dma_pages(hxgep);
522 	if (status != HXGE_OK) {
523 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
524 		goto hxge_attach_fail;
525 	}
526 
527 	hxge_hw_id_init(hxgep);
528 	hxge_hw_init_niu_common(hxgep);
529 
530 	status = hxge_setup_dev(hxgep);
531 	if (status != DDI_SUCCESS) {
532 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
533 		goto hxge_attach_fail;
534 	}
535 
536 	status = hxge_add_intrs(hxgep);
537 	if (status != DDI_SUCCESS) {
538 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
539 		goto hxge_attach_fail;
540 	}
541 
542 	/*
543 	 * Enable interrupts.
544 	 */
545 	hxge_intrs_enable(hxgep);
546 
547 	/* Keep copy of MSIx table written */
548 	hxge_store_msix_table(hxgep);
549 
550 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
551 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
552 		    "unable to register to mac layer (%d)", status));
553 		goto hxge_attach_fail;
554 	}
555 	mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
556 
557 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
558 	    instance));
559 
560 	goto hxge_attach_exit;
561 
562 hxge_attach_fail:
563 	hxge_unattach(hxgep);
564 	goto hxge_attach_fail1;
565 
566 hxge_attach_fail5:
567 	/*
568 	 * Tear down the ndd parameters setup.
569 	 */
570 	hxge_destroy_param(hxgep);
571 
572 	/*
573 	 * Tear down the kstat setup.
574 	 */
575 	hxge_destroy_kstats(hxgep);
576 
577 hxge_attach_fail4:
578 	if (hxgep->hxge_hw_p) {
579 		hxge_uninit_common_dev(hxgep);
580 		hxgep->hxge_hw_p = NULL;
581 	}
582 hxge_attach_fail3:
583 	/*
584 	 * Unmap the register setup.
585 	 */
586 	hxge_unmap_regs(hxgep);
587 
588 	hxge_fm_fini(hxgep);
589 
590 hxge_attach_fail2:
591 	ddi_soft_state_free(hxge_list, hxgep->instance);
592 
593 hxge_attach_fail1:
594 	if (status != HXGE_OK)
595 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
596 	hxgep = NULL;
597 
598 hxge_attach_exit:
599 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
600 	    status));
601 
602 	return (status);
603 }
604 
605 static int
606 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
607 {
608 	int		status = DDI_SUCCESS;
609 	int		instance;
610 	p_hxge_t	hxgep = NULL;
611 
612 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
613 	instance = ddi_get_instance(dip);
614 	hxgep = ddi_get_soft_state(hxge_list, instance);
615 	if (hxgep == NULL) {
616 		status = DDI_FAILURE;
617 		goto hxge_detach_exit;
618 	}
619 
620 	switch (cmd) {
621 	case DDI_DETACH:
622 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
623 		break;
624 
625 	case DDI_PM_SUSPEND:
626 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
627 		hxgep->suspended = DDI_PM_SUSPEND;
628 		hxge_suspend(hxgep);
629 		break;
630 
631 	case DDI_SUSPEND:
632 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
633 		if (hxgep->suspended != DDI_PM_SUSPEND) {
634 			hxgep->suspended = DDI_SUSPEND;
635 			hxge_suspend(hxgep);
636 		}
637 		break;
638 
639 	default:
640 		status = DDI_FAILURE;
641 		break;
642 	}
643 
644 	if (cmd != DDI_DETACH)
645 		goto hxge_detach_exit;
646 
647 	/*
648 	 * Stop the xcvr polling.
649 	 */
650 	hxgep->suspended = cmd;
651 
652 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
653 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
654 		    "<== hxge_detach status = 0x%08X", status));
655 		return (DDI_FAILURE);
656 	}
657 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
658 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
659 
660 	hxge_unattach(hxgep);
661 	hxgep = NULL;
662 
663 hxge_detach_exit:
664 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
665 	    status));
666 
667 	return (status);
668 }
669 
670 static void
671 hxge_unattach(p_hxge_t hxgep)
672 {
673 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
674 
675 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
676 		return;
677 	}
678 
679 	if (hxgep->hxge_hw_p) {
680 		hxge_uninit_common_dev(hxgep);
681 		hxgep->hxge_hw_p = NULL;
682 	}
683 
684 	if (hxgep->hxge_timerid) {
685 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
686 		hxgep->hxge_timerid = 0;
687 	}
688 
689 	/* Stop any further interrupts. */
690 	hxge_remove_intrs(hxgep);
691 
692 	/* Stop the device and free resources. */
693 	hxge_destroy_dev(hxgep);
694 
695 	/* Tear down the ndd parameters setup. */
696 	hxge_destroy_param(hxgep);
697 
698 	/* Tear down the kstat setup. */
699 	hxge_destroy_kstats(hxgep);
700 
701 	/*
702 	 * Remove the list of ndd parameters which were setup during attach.
703 	 */
704 	if (hxgep->dip) {
705 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
706 		    " hxge_unattach: remove all properties"));
707 		(void) ddi_prop_remove_all(hxgep->dip);
708 	}
709 
710 	/*
711 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
712 	 * previous state before unmapping the registers.
713 	 */
714 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
715 	HXGE_DELAY(1000);
716 
717 	/*
718 	 * Unmap the register setup.
719 	 */
720 	hxge_unmap_regs(hxgep);
721 
722 	hxge_fm_fini(hxgep);
723 
724 	/* Destroy all mutexes.  */
725 	hxge_destroy_mutexes(hxgep);
726 
727 	/*
728 	 * Free the soft state data structures allocated with this instance.
729 	 */
730 	ddi_soft_state_free(hxge_list, hxgep->instance);
731 
732 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
733 }
734 
735 static hxge_status_t
736 hxge_map_regs(p_hxge_t hxgep)
737 {
738 	int		ddi_status = DDI_SUCCESS;
739 	p_dev_regs_t	dev_regs;
740 
741 #ifdef	HXGE_DEBUG
742 	char		*sysname;
743 #endif
744 
745 	off_t		regsize;
746 	hxge_status_t	status = HXGE_OK;
747 	int		nregs;
748 
749 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
750 
751 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
752 		return (HXGE_ERROR);
753 
754 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
755 
756 	hxgep->dev_regs = NULL;
757 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
758 	dev_regs->hxge_regh = NULL;
759 	dev_regs->hxge_pciregh = NULL;
760 	dev_regs->hxge_msix_regh = NULL;
761 
762 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
763 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
764 	    "hxge_map_regs: pci config size 0x%x", regsize));
765 
766 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
767 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
768 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
769 	if (ddi_status != DDI_SUCCESS) {
770 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
771 		    "ddi_map_regs, hxge bus config regs failed"));
772 		goto hxge_map_regs_fail0;
773 	}
774 
775 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
776 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
777 	    dev_regs->hxge_pciregp,
778 	    dev_regs->hxge_pciregh));
779 
780 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
781 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
782 	    "hxge_map_regs: pio size 0x%x", regsize));
783 
784 	/* set up the device mapped register */
785 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
786 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
787 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
788 
789 	if (ddi_status != DDI_SUCCESS) {
790 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
791 		    "ddi_map_regs for Hydra global reg failed"));
792 		goto hxge_map_regs_fail1;
793 	}
794 
795 	/* set up the msi/msi-x mapped register */
796 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
797 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
798 	    "hxge_map_regs: msix size 0x%x", regsize));
799 
800 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
801 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
802 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
803 
804 	if (ddi_status != DDI_SUCCESS) {
805 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
806 		    "ddi_map_regs for msi reg failed"));
807 		goto hxge_map_regs_fail2;
808 	}
809 
810 	hxgep->dev_regs = dev_regs;
811 
812 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
813 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
814 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
815 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
816 
817 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
818 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
819 
820 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
821 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
822 
823 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
824 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
825 
826 	goto hxge_map_regs_exit;
827 
828 hxge_map_regs_fail3:
829 	if (dev_regs->hxge_msix_regh) {
830 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
831 	}
832 
833 hxge_map_regs_fail2:
834 	if (dev_regs->hxge_regh) {
835 		ddi_regs_map_free(&dev_regs->hxge_regh);
836 	}
837 
838 hxge_map_regs_fail1:
839 	if (dev_regs->hxge_pciregh) {
840 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
841 	}
842 
843 hxge_map_regs_fail0:
844 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
845 	kmem_free(dev_regs, sizeof (dev_regs_t));
846 
847 hxge_map_regs_exit:
848 	if (ddi_status != DDI_SUCCESS)
849 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
850 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
851 	return (status);
852 }
853 
854 static void
855 hxge_unmap_regs(p_hxge_t hxgep)
856 {
857 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
858 	if (hxgep->dev_regs) {
859 		if (hxgep->dev_regs->hxge_pciregh) {
860 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
861 			    "==> hxge_unmap_regs: bus"));
862 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
863 			hxgep->dev_regs->hxge_pciregh = NULL;
864 		}
865 
866 		if (hxgep->dev_regs->hxge_regh) {
867 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
868 			    "==> hxge_unmap_regs: device registers"));
869 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
870 			hxgep->dev_regs->hxge_regh = NULL;
871 		}
872 
873 		if (hxgep->dev_regs->hxge_msix_regh) {
874 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
875 			    "==> hxge_unmap_regs: device interrupts"));
876 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
877 			hxgep->dev_regs->hxge_msix_regh = NULL;
878 		}
879 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
880 		hxgep->dev_regs = NULL;
881 	}
882 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
883 }
884 
885 static hxge_status_t
886 hxge_setup_mutexes(p_hxge_t hxgep)
887 {
888 	int		ddi_status = DDI_SUCCESS;
889 	hxge_status_t	status = HXGE_OK;
890 
891 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
892 
893 	/*
894 	 * Get the interrupt cookie so the mutexes can be Initialised.
895 	 */
896 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
897 	    &hxgep->interrupt_cookie);
898 
899 	if (ddi_status != DDI_SUCCESS) {
900 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
901 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
902 		goto hxge_setup_mutexes_exit;
903 	}
904 
905 	/*
906 	 * Initialize mutex's for this device.
907 	 */
908 	MUTEX_INIT(hxgep->genlock, NULL,
909 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
910 	MUTEX_INIT(&hxgep->vmac_lock, NULL,
911 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
912 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
913 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
914 	RW_INIT(&hxgep->filter_lock, NULL,
915 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
916 	MUTEX_INIT(&hxgep->pio_lock, NULL,
917 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
918 	MUTEX_INIT(&hxgep->timeout.lock, NULL,
919 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
920 
921 hxge_setup_mutexes_exit:
922 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
923 	    "<== hxge_setup_mutexes status = %x", status));
924 
925 	if (ddi_status != DDI_SUCCESS)
926 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
927 
928 	return (status);
929 }
930 
931 static void
932 hxge_destroy_mutexes(p_hxge_t hxgep)
933 {
934 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
935 	RW_DESTROY(&hxgep->filter_lock);
936 	MUTEX_DESTROY(&hxgep->vmac_lock);
937 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
938 	MUTEX_DESTROY(hxgep->genlock);
939 	MUTEX_DESTROY(&hxgep->pio_lock);
940 	MUTEX_DESTROY(&hxgep->timeout.lock);
941 
942 	if (hxge_debug_init == 1) {
943 		MUTEX_DESTROY(&hxgedebuglock);
944 		hxge_debug_init = 0;
945 	}
946 
947 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
948 }
949 
950 hxge_status_t
951 hxge_init(p_hxge_t hxgep)
952 {
953 	hxge_status_t status = HXGE_OK;
954 
955 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
956 
957 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
958 		return (status);
959 	}
960 
961 	/*
962 	 * Allocate system memory for the receive/transmit buffer blocks and
963 	 * receive/transmit descriptor rings.
964 	 */
965 	status = hxge_alloc_mem_pool(hxgep);
966 	if (status != HXGE_OK) {
967 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
968 		goto hxge_init_fail1;
969 	}
970 
971 	/*
972 	 * Initialize and enable TXDMA channels.
973 	 */
974 	status = hxge_init_txdma_channels(hxgep);
975 	if (status != HXGE_OK) {
976 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
977 		goto hxge_init_fail3;
978 	}
979 
980 	/*
981 	 * Initialize and enable RXDMA channels.
982 	 */
983 	status = hxge_init_rxdma_channels(hxgep);
984 	if (status != HXGE_OK) {
985 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
986 		goto hxge_init_fail4;
987 	}
988 
989 	/*
990 	 * Initialize TCAM
991 	 */
992 	status = hxge_classify_init(hxgep);
993 	if (status != HXGE_OK) {
994 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
995 		goto hxge_init_fail5;
996 	}
997 
998 	/*
999 	 * Initialize the VMAC block.
1000 	 */
1001 	status = hxge_vmac_init(hxgep);
1002 	if (status != HXGE_OK) {
1003 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1004 		goto hxge_init_fail5;
1005 	}
1006 
1007 	/* Bringup - this may be unnecessary when PXE and FCODE available */
1008 	status = hxge_pfc_set_default_mac_addr(hxgep);
1009 	if (status != HXGE_OK) {
1010 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1011 		    "Default Address Failure\n"));
1012 		goto hxge_init_fail5;
1013 	}
1014 
1015 	hxge_intrs_enable(hxgep);
1016 
1017 	/* Keep copy of MSIx table written */
1018 	hxge_store_msix_table(hxgep);
1019 
1020 	/*
1021 	 * Enable hardware interrupts.
1022 	 */
1023 	hxge_intr_hw_enable(hxgep);
1024 	hxgep->drv_state |= STATE_HW_INITIALIZED;
1025 
1026 	goto hxge_init_exit;
1027 
1028 hxge_init_fail5:
1029 	hxge_uninit_rxdma_channels(hxgep);
1030 hxge_init_fail4:
1031 	hxge_uninit_txdma_channels(hxgep);
1032 hxge_init_fail3:
1033 	hxge_free_mem_pool(hxgep);
1034 hxge_init_fail1:
1035 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1036 	    "<== hxge_init status (failed) = 0x%08x", status));
1037 	return (status);
1038 
1039 hxge_init_exit:
1040 
1041 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1042 	    status));
1043 
1044 	return (status);
1045 }
1046 
1047 timeout_id_t
1048 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1049 {
1050 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1051 		return (timeout(func, (caddr_t)hxgep,
1052 		    drv_usectohz(1000 * msec)));
1053 	}
1054 	return (NULL);
1055 }
1056 
1057 /*ARGSUSED*/
1058 void
1059 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1060 {
1061 	if (timerid) {
1062 		(void) untimeout(timerid);
1063 	}
1064 }
1065 
1066 void
1067 hxge_uninit(p_hxge_t hxgep)
1068 {
1069 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1070 
1071 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1072 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1073 		    "==> hxge_uninit: not initialized"));
1074 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1075 		return;
1076 	}
1077 
1078 	/* Stop timer */
1079 	if (hxgep->hxge_timerid) {
1080 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1081 		hxgep->hxge_timerid = 0;
1082 	}
1083 
1084 	(void) hxge_intr_hw_disable(hxgep);
1085 
1086 	/* Reset the receive VMAC side.  */
1087 	(void) hxge_rx_vmac_disable(hxgep);
1088 
1089 	/* Free classification resources */
1090 	(void) hxge_classify_uninit(hxgep);
1091 
1092 	/* Reset the transmit/receive DMA side.  */
1093 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1094 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1095 
1096 	hxge_uninit_txdma_channels(hxgep);
1097 	hxge_uninit_rxdma_channels(hxgep);
1098 
1099 	/* Reset the transmit VMAC side.  */
1100 	(void) hxge_tx_vmac_disable(hxgep);
1101 
1102 	hxge_free_mem_pool(hxgep);
1103 
1104 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1105 
1106 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1107 }
1108 
1109 /*ARGSUSED*/
1110 /*VARARGS*/
1111 void
1112 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1113 {
1114 	char		msg_buffer[1048];
1115 	char		prefix_buffer[32];
1116 	int		instance;
1117 	uint64_t	debug_level;
1118 	int		cmn_level = CE_CONT;
1119 	va_list		ap;
1120 
1121 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1122 	    hxgep->hxge_debug_level;
1123 
1124 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1125 	    (level == HXGE_ERR_CTL)) {
1126 		/* do the msg processing */
1127 		if (hxge_debug_init == 0) {
1128 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1129 			hxge_debug_init = 1;
1130 		}
1131 
1132 		MUTEX_ENTER(&hxgedebuglock);
1133 
1134 		if ((level & HXGE_NOTE)) {
1135 			cmn_level = CE_NOTE;
1136 		}
1137 
1138 		if (level & HXGE_ERR_CTL) {
1139 			cmn_level = CE_WARN;
1140 		}
1141 
1142 		va_start(ap, fmt);
1143 		(void) vsprintf(msg_buffer, fmt, ap);
1144 		va_end(ap);
1145 
1146 		if (hxgep == NULL) {
1147 			instance = -1;
1148 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1149 		} else {
1150 			instance = hxgep->instance;
1151 			(void) sprintf(prefix_buffer,
1152 			    "%s%d :", "hxge", instance);
1153 		}
1154 
1155 		MUTEX_EXIT(&hxgedebuglock);
1156 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1157 	}
1158 }
1159 
1160 char *
1161 hxge_dump_packet(char *addr, int size)
1162 {
1163 	uchar_t		*ap = (uchar_t *)addr;
1164 	int		i;
1165 	static char	etherbuf[1024];
1166 	char		*cp = etherbuf;
1167 	char		digits[] = "0123456789abcdef";
1168 
1169 	if (!size)
1170 		size = 60;
1171 
1172 	if (size > MAX_DUMP_SZ) {
1173 		/* Dump the leading bytes */
1174 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1175 			if (*ap > 0x0f)
1176 				*cp++ = digits[*ap >> 4];
1177 			*cp++ = digits[*ap++ & 0xf];
1178 			*cp++ = ':';
1179 		}
1180 		for (i = 0; i < 20; i++)
1181 			*cp++ = '.';
1182 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1183 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1184 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1185 			if (*ap > 0x0f)
1186 				*cp++ = digits[*ap >> 4];
1187 			*cp++ = digits[*ap++ & 0xf];
1188 			*cp++ = ':';
1189 		}
1190 	} else {
1191 		for (i = 0; i < size; i++) {
1192 			if (*ap > 0x0f)
1193 				*cp++ = digits[*ap >> 4];
1194 			*cp++ = digits[*ap++ & 0xf];
1195 			*cp++ = ':';
1196 		}
1197 	}
1198 	*--cp = 0;
1199 	return (etherbuf);
1200 }
1201 
1202 static void
1203 hxge_suspend(p_hxge_t hxgep)
1204 {
1205 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1206 
1207 	/*
1208 	 * Stop the link status timer before hxge_intrs_disable() to avoid
1209 	 * accessing the the MSIX table simultaneously. Note that the timer
1210 	 * routine polls for MSIX parity errors.
1211 	 */
1212 	MUTEX_ENTER(&hxgep->timeout.lock);
1213 	if (hxgep->timeout.id)
1214 		(void) untimeout(hxgep->timeout.id);
1215 	MUTEX_EXIT(&hxgep->timeout.lock);
1216 
1217 	hxge_intrs_disable(hxgep);
1218 	hxge_destroy_dev(hxgep);
1219 
1220 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1221 }
1222 
1223 static hxge_status_t
1224 hxge_resume(p_hxge_t hxgep)
1225 {
1226 	hxge_status_t status = HXGE_OK;
1227 
1228 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1229 	hxgep->suspended = DDI_RESUME;
1230 
1231 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1232 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1233 
1234 	(void) hxge_rx_vmac_enable(hxgep);
1235 	(void) hxge_tx_vmac_enable(hxgep);
1236 
1237 	hxge_intrs_enable(hxgep);
1238 
1239 	/* Keep copy of MSIx table written */
1240 	hxge_store_msix_table(hxgep);
1241 
1242 	hxgep->suspended = 0;
1243 
1244 	/*
1245 	 * Resume the link status timer after hxge_intrs_enable to avoid
1246 	 * accessing MSIX table simultaneously.
1247 	 */
1248 	MUTEX_ENTER(&hxgep->timeout.lock);
1249 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1250 	    hxgep->timeout.ticks);
1251 	MUTEX_EXIT(&hxgep->timeout.lock);
1252 
1253 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1254 	    "<== hxge_resume status = 0x%x", status));
1255 
1256 	return (status);
1257 }
1258 
1259 hxge_status_t
1260 hxge_setup_dev(p_hxge_t hxgep)
1261 {
1262 	hxge_status_t status = HXGE_OK;
1263 
1264 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1265 
1266 	status = hxge_link_init(hxgep);
1267 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1268 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1269 		    "Bad register acc handle"));
1270 		status = HXGE_ERROR;
1271 	}
1272 
1273 	if (status != HXGE_OK) {
1274 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1275 		    " hxge_setup_dev status (link init 0x%08x)", status));
1276 		goto hxge_setup_dev_exit;
1277 	}
1278 
1279 hxge_setup_dev_exit:
1280 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1281 	    "<== hxge_setup_dev status = 0x%08x", status));
1282 
1283 	return (status);
1284 }
1285 
1286 static void
1287 hxge_destroy_dev(p_hxge_t hxgep)
1288 {
1289 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1290 
1291 	(void) hxge_hw_stop(hxgep);
1292 
1293 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1294 }
1295 
1296 static hxge_status_t
1297 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1298 {
1299 	int			ddi_status = DDI_SUCCESS;
1300 	uint_t			count;
1301 	ddi_dma_cookie_t	cookie;
1302 	uint_t			iommu_pagesize;
1303 	hxge_status_t		status = HXGE_OK;
1304 
1305 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1306 
1307 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1308 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1309 
1310 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1311 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1312 	    " default_block_size %d iommu_pagesize %d",
1313 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1314 	    hxgep->rx_default_block_size, iommu_pagesize));
1315 
1316 	if (iommu_pagesize != 0) {
1317 		if (hxgep->sys_page_sz == iommu_pagesize) {
1318 			/* Hydra support up to 8K pages */
1319 			if (iommu_pagesize > 0x2000)
1320 				hxgep->sys_page_sz = 0x2000;
1321 		} else {
1322 			if (hxgep->sys_page_sz > iommu_pagesize)
1323 				hxgep->sys_page_sz = iommu_pagesize;
1324 		}
1325 	}
1326 
1327 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1328 
1329 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1330 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1331 	    "default_block_size %d page mask %d",
1332 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1333 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1334 
1335 	switch (hxgep->sys_page_sz) {
1336 	default:
1337 		hxgep->sys_page_sz = 0x1000;
1338 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1339 		hxgep->rx_default_block_size = 0x1000;
1340 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1341 		break;
1342 	case 0x1000:
1343 		hxgep->rx_default_block_size = 0x1000;
1344 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1345 		break;
1346 	case 0x2000:
1347 		hxgep->rx_default_block_size = 0x2000;
1348 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1349 		break;
1350 	}
1351 
1352 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1353 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1354 
1355 	/*
1356 	 * Get the system DMA burst size.
1357 	 */
1358 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1359 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1360 	if (ddi_status != DDI_SUCCESS) {
1361 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1362 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1363 		goto hxge_get_soft_properties_exit;
1364 	}
1365 
1366 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1367 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1368 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1369 	    &cookie, &count);
1370 	if (ddi_status != DDI_DMA_MAPPED) {
1371 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1372 		    "Binding spare handle to find system burstsize failed."));
1373 		ddi_status = DDI_FAILURE;
1374 		goto hxge_get_soft_properties_fail1;
1375 	}
1376 
1377 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1378 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1379 
1380 hxge_get_soft_properties_fail1:
1381 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1382 
1383 hxge_get_soft_properties_exit:
1384 
1385 	if (ddi_status != DDI_SUCCESS)
1386 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1387 
1388 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1389 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1390 
1391 	return (status);
1392 }
1393 
1394 hxge_status_t
1395 hxge_alloc_mem_pool(p_hxge_t hxgep)
1396 {
1397 	hxge_status_t status = HXGE_OK;
1398 
1399 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1400 
1401 	status = hxge_alloc_rx_mem_pool(hxgep);
1402 	if (status != HXGE_OK) {
1403 		return (HXGE_ERROR);
1404 	}
1405 
1406 	status = hxge_alloc_tx_mem_pool(hxgep);
1407 	if (status != HXGE_OK) {
1408 		hxge_free_rx_mem_pool(hxgep);
1409 		return (HXGE_ERROR);
1410 	}
1411 
1412 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1413 	return (HXGE_OK);
1414 }
1415 
1416 static void
1417 hxge_free_mem_pool(p_hxge_t hxgep)
1418 {
1419 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1420 
1421 	hxge_free_rx_mem_pool(hxgep);
1422 	hxge_free_tx_mem_pool(hxgep);
1423 
1424 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1425 }
1426 
1427 static hxge_status_t
1428 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1429 {
1430 	int			i, j;
1431 	uint32_t		ndmas, st_rdc;
1432 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1433 	p_hxge_hw_pt_cfg_t	p_cfgp;
1434 	p_hxge_dma_pool_t	dma_poolp;
1435 	p_hxge_dma_common_t	*dma_buf_p;
1436 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1437 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1438 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1439 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1440 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1441 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1442 	size_t			rx_buf_alloc_size;
1443 	size_t			rx_rbr_cntl_alloc_size;
1444 	size_t			rx_rcr_cntl_alloc_size;
1445 	size_t			rx_mbox_cntl_alloc_size;
1446 	uint32_t		*num_chunks;	/* per dma */
1447 	hxge_status_t		status = HXGE_OK;
1448 
1449 	uint32_t		hxge_port_rbr_size;
1450 	uint32_t		hxge_port_rbr_spare_size;
1451 	uint32_t		hxge_port_rcr_size;
1452 
1453 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1454 
1455 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1456 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1457 	st_rdc = p_cfgp->start_rdc;
1458 	ndmas = p_cfgp->max_rdcs;
1459 
1460 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1461 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1462 
1463 	/*
1464 	 * Allocate memory for each receive DMA channel.
1465 	 */
1466 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1467 	    KM_SLEEP);
1468 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1469 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1470 
1471 	dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1472 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1473 	dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1474 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1475 	dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1476 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1477 	dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1478 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1479 	dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1480 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1481 	dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1482 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1483 
1484 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1485 	    KM_SLEEP);
1486 
1487 	/*
1488 	 * Assume that each DMA channel will be configured with default block
1489 	 * size. rbr block counts are mod of batch count (16).
1490 	 */
1491 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1492 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1493 
1494 	if (!hxge_port_rbr_size) {
1495 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1496 	}
1497 
1498 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1499 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1500 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1501 	}
1502 
1503 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1504 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1505 
1506 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1507 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1508 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1509 	}
1510 
1511 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1512 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1513 
1514 	/*
1515 	 * Addresses of receive block ring, receive completion ring and the
1516 	 * mailbox must be all cache-aligned (64 bytes).
1517 	 */
1518 	rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1519 	rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1520 	rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1521 	rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1522 
1523 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1524 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1525 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1526 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1527 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1528 
1529 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1530 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1531 
1532 	/*
1533 	 * Allocate memory for receive buffers and descriptor rings. Replace
1534 	 * allocation functions with interface functions provided by the
1535 	 * partition manager when it is available.
1536 	 */
1537 	/*
1538 	 * Allocate memory for the receive buffer blocks.
1539 	 */
1540 	for (i = 0; i < ndmas; i++) {
1541 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1542 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1543 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1544 		    i, dma_buf_p[i], &dma_buf_p[i]));
1545 
1546 		num_chunks[i] = 0;
1547 
1548 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1549 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1550 		    &num_chunks[i]);
1551 		if (status != HXGE_OK) {
1552 			break;
1553 		}
1554 
1555 		st_rdc++;
1556 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1557 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1558 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1559 		    dma_buf_p[i], &dma_buf_p[i]));
1560 	}
1561 
1562 	if (i < ndmas) {
1563 		goto hxge_alloc_rx_mem_fail1;
1564 	}
1565 
1566 	/*
1567 	 * Allocate memory for descriptor rings and mailbox.
1568 	 */
1569 	st_rdc = p_cfgp->start_rdc;
1570 	for (j = 0; j < ndmas; j++) {
1571 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1572 		    &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1573 		    rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1574 			break;
1575 		}
1576 
1577 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1578 		    &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1579 		    rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1580 			break;
1581 		}
1582 
1583 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1584 		    &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1585 		    rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1586 			break;
1587 		}
1588 		st_rdc++;
1589 	}
1590 
1591 	if (j < ndmas) {
1592 		goto hxge_alloc_rx_mem_fail2;
1593 	}
1594 
1595 	dma_poolp->ndmas = ndmas;
1596 	dma_poolp->num_chunks = num_chunks;
1597 	dma_poolp->buf_allocated = B_TRUE;
1598 	hxgep->rx_buf_pool_p = dma_poolp;
1599 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1600 
1601 	dma_rbr_cntl_poolp->ndmas = ndmas;
1602 	dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1603 	hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1604 	dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1605 
1606 	dma_rcr_cntl_poolp->ndmas = ndmas;
1607 	dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1608 	hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1609 	dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1610 
1611 	dma_mbox_cntl_poolp->ndmas = ndmas;
1612 	dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1613 	hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1614 	dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1615 
1616 	goto hxge_alloc_rx_mem_pool_exit;
1617 
1618 hxge_alloc_rx_mem_fail2:
1619 	/* Free control buffers */
1620 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1621 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1622 	for (; j >= 0; j--) {
1623 		hxge_free_rx_cntl_dma(hxgep,
1624 		    (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1625 		hxge_free_rx_cntl_dma(hxgep,
1626 		    (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1627 		hxge_free_rx_cntl_dma(hxgep,
1628 		    (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1629 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1630 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1631 	}
1632 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1633 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1634 
1635 hxge_alloc_rx_mem_fail1:
1636 	/* Free data buffers */
1637 	i--;
1638 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1639 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1640 	for (; i >= 0; i--) {
1641 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1642 		    num_chunks[i]);
1643 	}
1644 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1645 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1646 
1647 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1648 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1649 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1650 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1651 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1652 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1653 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1654 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1655 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1656 
1657 hxge_alloc_rx_mem_pool_exit:
1658 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1659 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1660 
1661 	return (status);
1662 }
1663 
1664 static void
1665 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1666 {
1667 	uint32_t		i, ndmas;
1668 	p_hxge_dma_pool_t	dma_poolp;
1669 	p_hxge_dma_common_t	*dma_buf_p;
1670 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1671 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1672 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1673 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1674 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1675 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1676 	uint32_t		*num_chunks;
1677 
1678 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1679 
1680 	dma_poolp = hxgep->rx_buf_pool_p;
1681 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1682 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1683 		    "(null rx buf pool or buf not allocated"));
1684 		return;
1685 	}
1686 
1687 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1688 	if (dma_rbr_cntl_poolp == NULL ||
1689 	    (!dma_rbr_cntl_poolp->buf_allocated)) {
1690 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1691 		    "<== hxge_free_rx_mem_pool "
1692 		    "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1693 		return;
1694 	}
1695 
1696 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1697 	if (dma_rcr_cntl_poolp == NULL ||
1698 	    (!dma_rcr_cntl_poolp->buf_allocated)) {
1699 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1700 		    "<== hxge_free_rx_mem_pool "
1701 		    "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1702 		return;
1703 	}
1704 
1705 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1706 	if (dma_mbox_cntl_poolp == NULL ||
1707 	    (!dma_mbox_cntl_poolp->buf_allocated)) {
1708 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1709 		    "<== hxge_free_rx_mem_pool "
1710 		    "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1711 		return;
1712 	}
1713 
1714 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1715 	num_chunks = dma_poolp->num_chunks;
1716 
1717 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1718 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1719 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1720 	ndmas = dma_rbr_cntl_poolp->ndmas;
1721 
1722 	for (i = 0; i < ndmas; i++) {
1723 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1724 	}
1725 
1726 	for (i = 0; i < ndmas; i++) {
1727 		hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1728 		hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1729 		hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1730 	}
1731 
1732 	for (i = 0; i < ndmas; i++) {
1733 		KMEM_FREE(dma_buf_p[i],
1734 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1735 		KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1736 		KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1737 		KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1738 	}
1739 
1740 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1741 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1742 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1743 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1744 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1745 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1746 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1747 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1748 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1749 
1750 	hxgep->rx_buf_pool_p = NULL;
1751 	hxgep->rx_rbr_cntl_pool_p = NULL;
1752 	hxgep->rx_rcr_cntl_pool_p = NULL;
1753 	hxgep->rx_mbox_cntl_pool_p = NULL;
1754 
1755 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1756 }
1757 
1758 static hxge_status_t
1759 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1760     p_hxge_dma_common_t *dmap,
1761     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1762 {
1763 	p_hxge_dma_common_t	rx_dmap;
1764 	hxge_status_t		status = HXGE_OK;
1765 	size_t			total_alloc_size;
1766 	size_t			allocated = 0;
1767 	int			i, size_index, array_size;
1768 
1769 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1770 
1771 	rx_dmap = (p_hxge_dma_common_t)
1772 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1773 
1774 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1775 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1776 	    dma_channel, alloc_size, block_size, dmap));
1777 
1778 	total_alloc_size = alloc_size;
1779 
1780 	i = 0;
1781 	size_index = 0;
1782 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1783 	while ((size_index < array_size) &&
1784 	    (alloc_sizes[size_index] < alloc_size))
1785 		size_index++;
1786 	if (size_index >= array_size) {
1787 		size_index = array_size - 1;
1788 	}
1789 
1790 	while ((allocated < total_alloc_size) &&
1791 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1792 		rx_dmap[i].dma_chunk_index = i;
1793 		rx_dmap[i].block_size = block_size;
1794 		rx_dmap[i].alength = alloc_sizes[size_index];
1795 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1796 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1797 		rx_dmap[i].dma_channel = dma_channel;
1798 		rx_dmap[i].contig_alloc_type = B_FALSE;
1799 
1800 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1801 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1802 		    "i %d nblocks %d alength %d",
1803 		    dma_channel, i, &rx_dmap[i], block_size,
1804 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1805 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1806 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1807 		    &hxge_dev_buf_dma_acc_attr,
1808 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1809 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1810 		if (status != HXGE_OK) {
1811 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1812 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1813 			    " for size: %d", alloc_sizes[size_index]));
1814 			size_index--;
1815 		} else {
1816 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1817 			    " alloc_rx_buf_dma allocated rdc %d "
1818 			    "chunk %d size %x dvma %x bufp %llx ",
1819 			    dma_channel, i, rx_dmap[i].alength,
1820 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1821 			i++;
1822 			allocated += alloc_sizes[size_index];
1823 		}
1824 	}
1825 
1826 	if (allocated < total_alloc_size) {
1827 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1828 		    " hxge_alloc_rx_buf_dma failed due to"
1829 		    " allocated(%d) < required(%d)",
1830 		    allocated, total_alloc_size));
1831 		goto hxge_alloc_rx_mem_fail1;
1832 	}
1833 
1834 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1835 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1836 
1837 	*num_chunks = i;
1838 	*dmap = rx_dmap;
1839 
1840 	goto hxge_alloc_rx_mem_exit;
1841 
1842 hxge_alloc_rx_mem_fail1:
1843 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1844 
1845 hxge_alloc_rx_mem_exit:
1846 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1847 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1848 
1849 	return (status);
1850 }
1851 
1852 /*ARGSUSED*/
1853 static void
1854 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1855     uint32_t num_chunks)
1856 {
1857 	int i;
1858 
1859 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1860 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1861 
1862 	for (i = 0; i < num_chunks; i++) {
1863 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1864 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1865 		hxge_dma_mem_free(dmap++);
1866 	}
1867 
1868 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1869 }
1870 
1871 /*ARGSUSED*/
1872 static hxge_status_t
1873 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1874     p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1875 {
1876 	p_hxge_dma_common_t	rx_dmap;
1877 	hxge_status_t		status = HXGE_OK;
1878 
1879 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1880 
1881 	rx_dmap = (p_hxge_dma_common_t)
1882 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1883 
1884 	rx_dmap->contig_alloc_type = B_FALSE;
1885 
1886 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1887 	    attr, size, &hxge_dev_desc_dma_acc_attr,
1888 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1889 	if (status != HXGE_OK) {
1890 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1891 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1892 		    " for size: %d", size));
1893 		goto hxge_alloc_rx_cntl_dma_fail1;
1894 	}
1895 
1896 	*dmap = rx_dmap;
1897 
1898 	goto hxge_alloc_rx_cntl_dma_exit;
1899 
1900 hxge_alloc_rx_cntl_dma_fail1:
1901 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1902 
1903 hxge_alloc_rx_cntl_dma_exit:
1904 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1905 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1906 
1907 	return (status);
1908 }
1909 
1910 /*ARGSUSED*/
1911 static void
1912 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1913 {
1914 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1915 
1916 	hxge_dma_mem_free(dmap);
1917 
1918 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1919 }
1920 
1921 static hxge_status_t
1922 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1923 {
1924 	hxge_status_t		status = HXGE_OK;
1925 	int			i, j;
1926 	uint32_t		ndmas, st_tdc;
1927 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1928 	p_hxge_hw_pt_cfg_t	p_cfgp;
1929 	p_hxge_dma_pool_t	dma_poolp;
1930 	p_hxge_dma_common_t	*dma_buf_p;
1931 	p_hxge_dma_pool_t	dma_cntl_poolp;
1932 	p_hxge_dma_common_t	*dma_cntl_p;
1933 	size_t			tx_buf_alloc_size;
1934 	size_t			tx_cntl_alloc_size;
1935 	uint32_t		*num_chunks;	/* per dma */
1936 
1937 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1938 
1939 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1940 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1941 	st_tdc = p_cfgp->start_tdc;
1942 	ndmas = p_cfgp->max_tdcs;
1943 
1944 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1945 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1946 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1947 	/*
1948 	 * Allocate memory for each transmit DMA channel.
1949 	 */
1950 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1951 	    KM_SLEEP);
1952 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1953 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1954 
1955 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1956 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1957 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1958 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1959 
1960 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1961 
1962 	/*
1963 	 * Assume that each DMA channel will be configured with default
1964 	 * transmit bufer size for copying transmit data. (For packet payload
1965 	 * over this limit, packets will not be copied.)
1966 	 */
1967 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1968 
1969 	/*
1970 	 * Addresses of transmit descriptor ring and the mailbox must be all
1971 	 * cache-aligned (64 bytes).
1972 	 */
1973 	tx_cntl_alloc_size = hxge_tx_ring_size;
1974 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1975 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1976 
1977 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1978 	    KM_SLEEP);
1979 
1980 	/*
1981 	 * Allocate memory for transmit buffers and descriptor rings. Replace
1982 	 * allocation functions with interface functions provided by the
1983 	 * partition manager when it is available.
1984 	 *
1985 	 * Allocate memory for the transmit buffer pool.
1986 	 */
1987 	for (i = 0; i < ndmas; i++) {
1988 		num_chunks[i] = 0;
1989 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1990 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1991 		if (status != HXGE_OK) {
1992 			break;
1993 		}
1994 		st_tdc++;
1995 	}
1996 
1997 	if (i < ndmas) {
1998 		goto hxge_alloc_tx_mem_pool_fail1;
1999 	}
2000 
2001 	st_tdc = p_cfgp->start_tdc;
2002 
2003 	/*
2004 	 * Allocate memory for descriptor rings and mailbox.
2005 	 */
2006 	for (j = 0; j < ndmas; j++) {
2007 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2008 		    tx_cntl_alloc_size);
2009 		if (status != HXGE_OK) {
2010 			break;
2011 		}
2012 		st_tdc++;
2013 	}
2014 
2015 	if (j < ndmas) {
2016 		goto hxge_alloc_tx_mem_pool_fail2;
2017 	}
2018 
2019 	dma_poolp->ndmas = ndmas;
2020 	dma_poolp->num_chunks = num_chunks;
2021 	dma_poolp->buf_allocated = B_TRUE;
2022 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2023 	hxgep->tx_buf_pool_p = dma_poolp;
2024 
2025 	dma_cntl_poolp->ndmas = ndmas;
2026 	dma_cntl_poolp->buf_allocated = B_TRUE;
2027 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2028 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2029 
2030 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2031 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2032 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2033 
2034 	goto hxge_alloc_tx_mem_pool_exit;
2035 
2036 hxge_alloc_tx_mem_pool_fail2:
2037 	/* Free control buffers */
2038 	j--;
2039 	for (; j >= 0; j--) {
2040 		hxge_free_tx_cntl_dma(hxgep,
2041 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
2042 	}
2043 
2044 hxge_alloc_tx_mem_pool_fail1:
2045 	/* Free data buffers */
2046 	i--;
2047 	for (; i >= 0; i--) {
2048 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2049 		    num_chunks[i]);
2050 	}
2051 
2052 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2053 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2054 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2055 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2056 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2057 
2058 hxge_alloc_tx_mem_pool_exit:
2059 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2060 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2061 
2062 	return (status);
2063 }
2064 
2065 static hxge_status_t
2066 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2067     p_hxge_dma_common_t *dmap, size_t alloc_size,
2068     size_t block_size, uint32_t *num_chunks)
2069 {
2070 	p_hxge_dma_common_t	tx_dmap;
2071 	hxge_status_t		status = HXGE_OK;
2072 	size_t			total_alloc_size;
2073 	size_t			allocated = 0;
2074 	int			i, size_index, array_size;
2075 
2076 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2077 
2078 	tx_dmap = (p_hxge_dma_common_t)
2079 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2080 
2081 	total_alloc_size = alloc_size;
2082 	i = 0;
2083 	size_index = 0;
2084 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
2085 	while ((size_index < array_size) &&
2086 	    (alloc_sizes[size_index] < alloc_size))
2087 		size_index++;
2088 	if (size_index >= array_size) {
2089 		size_index = array_size - 1;
2090 	}
2091 
2092 	while ((allocated < total_alloc_size) &&
2093 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2094 		tx_dmap[i].dma_chunk_index = i;
2095 		tx_dmap[i].block_size = block_size;
2096 		tx_dmap[i].alength = alloc_sizes[size_index];
2097 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2098 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2099 		tx_dmap[i].dma_channel = dma_channel;
2100 		tx_dmap[i].contig_alloc_type = B_FALSE;
2101 
2102 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2103 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
2104 		    &hxge_dev_buf_dma_acc_attr,
2105 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
2106 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
2107 		if (status != HXGE_OK) {
2108 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2109 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
2110 			    " for size: %d", alloc_sizes[size_index]));
2111 			size_index--;
2112 		} else {
2113 			i++;
2114 			allocated += alloc_sizes[size_index];
2115 		}
2116 	}
2117 
2118 	if (allocated < total_alloc_size) {
2119 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2120 		    " hxge_alloc_tx_buf_dma: failed due to"
2121 		    " allocated(%d) < required(%d)",
2122 		    allocated, total_alloc_size));
2123 		goto hxge_alloc_tx_mem_fail1;
2124 	}
2125 
2126 	*num_chunks = i;
2127 	*dmap = tx_dmap;
2128 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2129 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2130 	    *dmap, i));
2131 	goto hxge_alloc_tx_mem_exit;
2132 
2133 hxge_alloc_tx_mem_fail1:
2134 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2135 
2136 hxge_alloc_tx_mem_exit:
2137 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2138 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2139 
2140 	return (status);
2141 }
2142 
2143 /*ARGSUSED*/
2144 static void
2145 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2146     uint32_t num_chunks)
2147 {
2148 	int i;
2149 
2150 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2151 
2152 	for (i = 0; i < num_chunks; i++) {
2153 		hxge_dma_mem_free(dmap++);
2154 	}
2155 
2156 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2157 }
2158 
2159 /*ARGSUSED*/
2160 static hxge_status_t
2161 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2162     p_hxge_dma_common_t *dmap, size_t size)
2163 {
2164 	p_hxge_dma_common_t	tx_dmap;
2165 	hxge_status_t		status = HXGE_OK;
2166 
2167 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2168 
2169 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2170 	    KM_SLEEP);
2171 
2172 	tx_dmap->contig_alloc_type = B_FALSE;
2173 
2174 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2175 	    &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2176 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2177 	if (status != HXGE_OK) {
2178 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2179 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2180 		    " for size: %d", size));
2181 		goto hxge_alloc_tx_cntl_dma_fail1;
2182 	}
2183 
2184 	*dmap = tx_dmap;
2185 
2186 	goto hxge_alloc_tx_cntl_dma_exit;
2187 
2188 hxge_alloc_tx_cntl_dma_fail1:
2189 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2190 
2191 hxge_alloc_tx_cntl_dma_exit:
2192 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2193 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2194 
2195 	return (status);
2196 }
2197 
2198 /*ARGSUSED*/
2199 static void
2200 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2201 {
2202 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2203 
2204 	hxge_dma_mem_free(dmap);
2205 
2206 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2207 }
2208 
2209 static void
2210 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2211 {
2212 	uint32_t		i, ndmas;
2213 	p_hxge_dma_pool_t	dma_poolp;
2214 	p_hxge_dma_common_t	*dma_buf_p;
2215 	p_hxge_dma_pool_t	dma_cntl_poolp;
2216 	p_hxge_dma_common_t	*dma_cntl_p;
2217 	uint32_t		*num_chunks;
2218 
2219 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2220 
2221 	dma_poolp = hxgep->tx_buf_pool_p;
2222 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2223 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2224 		    "<== hxge_free_tx_mem_pool "
2225 		    "(null rx buf pool or buf not allocated"));
2226 		return;
2227 	}
2228 
2229 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2230 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2231 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2232 		    "<== hxge_free_tx_mem_pool "
2233 		    "(null tx cntl buf pool or cntl buf not allocated"));
2234 		return;
2235 	}
2236 
2237 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2238 	num_chunks = dma_poolp->num_chunks;
2239 
2240 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2241 	ndmas = dma_cntl_poolp->ndmas;
2242 
2243 	for (i = 0; i < ndmas; i++) {
2244 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2245 	}
2246 
2247 	for (i = 0; i < ndmas; i++) {
2248 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2249 	}
2250 
2251 	for (i = 0; i < ndmas; i++) {
2252 		KMEM_FREE(dma_buf_p[i],
2253 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2254 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2255 	}
2256 
2257 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2258 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2259 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2260 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2261 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2262 
2263 	hxgep->tx_buf_pool_p = NULL;
2264 	hxgep->tx_cntl_pool_p = NULL;
2265 
2266 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2267 }
2268 
2269 /*ARGSUSED*/
2270 static hxge_status_t
2271 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2272     struct ddi_dma_attr *dma_attrp,
2273     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2274     p_hxge_dma_common_t dma_p)
2275 {
2276 	caddr_t		kaddrp;
2277 	int		ddi_status = DDI_SUCCESS;
2278 
2279 	dma_p->dma_handle = NULL;
2280 	dma_p->acc_handle = NULL;
2281 	dma_p->kaddrp = NULL;
2282 
2283 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2284 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2285 	if (ddi_status != DDI_SUCCESS) {
2286 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2287 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2288 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2289 	}
2290 
2291 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2292 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2293 	    &dma_p->acc_handle);
2294 	if (ddi_status != DDI_SUCCESS) {
2295 		/* The caller will decide whether it is fatal */
2296 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2297 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2298 		ddi_dma_free_handle(&dma_p->dma_handle);
2299 		dma_p->dma_handle = NULL;
2300 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2301 	}
2302 
2303 	if (dma_p->alength < length) {
2304 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2305 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2306 		ddi_dma_mem_free(&dma_p->acc_handle);
2307 		ddi_dma_free_handle(&dma_p->dma_handle);
2308 		dma_p->acc_handle = NULL;
2309 		dma_p->dma_handle = NULL;
2310 		return (HXGE_ERROR);
2311 	}
2312 
2313 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2314 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2315 	    &dma_p->dma_cookie, &dma_p->ncookies);
2316 	if (ddi_status != DDI_DMA_MAPPED) {
2317 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2318 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2319 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2320 		if (dma_p->acc_handle) {
2321 			ddi_dma_mem_free(&dma_p->acc_handle);
2322 			dma_p->acc_handle = NULL;
2323 		}
2324 		ddi_dma_free_handle(&dma_p->dma_handle);
2325 		dma_p->dma_handle = NULL;
2326 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2327 	}
2328 
2329 	if (dma_p->ncookies != 1) {
2330 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2331 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2332 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2333 		if (dma_p->acc_handle) {
2334 			ddi_dma_mem_free(&dma_p->acc_handle);
2335 			dma_p->acc_handle = NULL;
2336 		}
2337 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2338 		ddi_dma_free_handle(&dma_p->dma_handle);
2339 		dma_p->dma_handle = NULL;
2340 		return (HXGE_ERROR);
2341 	}
2342 
2343 	dma_p->kaddrp = kaddrp;
2344 #if defined(__i386)
2345 	dma_p->ioaddr_pp =
2346 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2347 #else
2348 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2349 #endif
2350 
2351 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2352 
2353 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2354 	    "dma buffer allocated: dma_p $%p "
2355 	    "return dmac_ladress from cookie $%p dmac_size %d "
2356 	    "dma_p->ioaddr_p $%p "
2357 	    "dma_p->orig_ioaddr_p $%p "
2358 	    "orig_vatopa $%p "
2359 	    "alength %d (0x%x) "
2360 	    "kaddrp $%p "
2361 	    "length %d (0x%x)",
2362 	    dma_p,
2363 	    dma_p->dma_cookie.dmac_laddress,
2364 	    dma_p->dma_cookie.dmac_size,
2365 	    dma_p->ioaddr_pp,
2366 	    dma_p->orig_ioaddr_pp,
2367 	    dma_p->orig_vatopa,
2368 	    dma_p->alength, dma_p->alength,
2369 	    kaddrp,
2370 	    length, length));
2371 
2372 	return (HXGE_OK);
2373 }
2374 
2375 static void
2376 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2377 {
2378 	if (dma_p == NULL)
2379 		return;
2380 
2381 	if (dma_p->dma_handle != NULL) {
2382 		if (dma_p->ncookies) {
2383 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2384 			dma_p->ncookies = 0;
2385 		}
2386 		ddi_dma_free_handle(&dma_p->dma_handle);
2387 		dma_p->dma_handle = NULL;
2388 	}
2389 
2390 	if (dma_p->acc_handle != NULL) {
2391 		ddi_dma_mem_free(&dma_p->acc_handle);
2392 		dma_p->acc_handle = NULL;
2393 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2394 	}
2395 
2396 	dma_p->kaddrp = NULL;
2397 	dma_p->alength = NULL;
2398 }
2399 
2400 /*
2401  *	hxge_m_start() -- start transmitting and receiving.
2402  *
2403  *	This function is called by the MAC layer when the first
2404  *	stream is open to prepare the hardware ready for sending
2405  *	and transmitting packets.
2406  */
2407 static int
2408 hxge_m_start(void *arg)
2409 {
2410 	p_hxge_t hxgep = (p_hxge_t)arg;
2411 
2412 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2413 
2414 	MUTEX_ENTER(hxgep->genlock);
2415 
2416 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2417 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2418 		    "<== hxge_m_start: initialization failed"));
2419 		MUTEX_EXIT(hxgep->genlock);
2420 		return (EIO);
2421 	}
2422 
2423 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2424 		/*
2425 		 * Start timer to check the system error and tx hangs
2426 		 */
2427 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2428 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2429 
2430 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2431 
2432 		hxgep->timeout.link_status = 0;
2433 		hxgep->timeout.report_link_status = B_TRUE;
2434 		hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2435 
2436 		/* Start the link status timer to check the link status */
2437 		MUTEX_ENTER(&hxgep->timeout.lock);
2438 		hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2439 		    hxgep->timeout.ticks);
2440 		MUTEX_EXIT(&hxgep->timeout.lock);
2441 	}
2442 
2443 	MUTEX_EXIT(hxgep->genlock);
2444 
2445 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2446 
2447 	return (0);
2448 }
2449 
2450 /*
2451  * hxge_m_stop(): stop transmitting and receiving.
2452  */
2453 static void
2454 hxge_m_stop(void *arg)
2455 {
2456 	p_hxge_t hxgep = (p_hxge_t)arg;
2457 
2458 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2459 
2460 	if (hxgep->hxge_timerid) {
2461 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2462 		hxgep->hxge_timerid = 0;
2463 	}
2464 
2465 	/* Stop the link status timer before unregistering */
2466 	MUTEX_ENTER(&hxgep->timeout.lock);
2467 	if (hxgep->timeout.id) {
2468 		(void) untimeout(hxgep->timeout.id);
2469 		hxgep->timeout.id = 0;
2470 	}
2471 	hxge_link_update(hxgep, LINK_STATE_DOWN);
2472 	MUTEX_EXIT(&hxgep->timeout.lock);
2473 
2474 	MUTEX_ENTER(hxgep->genlock);
2475 
2476 	hxge_uninit(hxgep);
2477 
2478 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2479 
2480 	MUTEX_EXIT(hxgep->genlock);
2481 
2482 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2483 }
2484 
2485 static int
2486 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2487 {
2488 	p_hxge_t		hxgep = (p_hxge_t)arg;
2489 	struct ether_addr	addrp;
2490 
2491 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2492 
2493 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2494 
2495 	if (add) {
2496 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2497 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2498 			    "<== hxge_m_multicst: add multicast failed"));
2499 			return (EINVAL);
2500 		}
2501 	} else {
2502 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2503 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2504 			    "<== hxge_m_multicst: del multicast failed"));
2505 			return (EINVAL);
2506 		}
2507 	}
2508 
2509 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2510 
2511 	return (0);
2512 }
2513 
2514 static int
2515 hxge_m_promisc(void *arg, boolean_t on)
2516 {
2517 	p_hxge_t hxgep = (p_hxge_t)arg;
2518 
2519 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2520 
2521 	if (hxge_set_promisc(hxgep, on)) {
2522 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2523 		    "<== hxge_m_promisc: set promisc failed"));
2524 		return (EINVAL);
2525 	}
2526 
2527 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2528 
2529 	return (0);
2530 }
2531 
2532 static void
2533 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2534 {
2535 	p_hxge_t	hxgep = (p_hxge_t)arg;
2536 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2537 	boolean_t	need_privilege;
2538 	int		err;
2539 	int		cmd;
2540 
2541 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2542 
2543 	iocp = (struct iocblk *)mp->b_rptr;
2544 	iocp->ioc_error = 0;
2545 	need_privilege = B_TRUE;
2546 	cmd = iocp->ioc_cmd;
2547 
2548 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2549 	switch (cmd) {
2550 	default:
2551 		miocnak(wq, mp, 0, EINVAL);
2552 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2553 		return;
2554 
2555 	case LB_GET_INFO_SIZE:
2556 	case LB_GET_INFO:
2557 	case LB_GET_MODE:
2558 		need_privilege = B_FALSE;
2559 		break;
2560 
2561 	case LB_SET_MODE:
2562 		break;
2563 
2564 	case ND_GET:
2565 		need_privilege = B_FALSE;
2566 		break;
2567 	case ND_SET:
2568 		break;
2569 
2570 	case HXGE_GET_TX_RING_SZ:
2571 	case HXGE_GET_TX_DESC:
2572 	case HXGE_TX_SIDE_RESET:
2573 	case HXGE_RX_SIDE_RESET:
2574 	case HXGE_GLOBAL_RESET:
2575 	case HXGE_RESET_MAC:
2576 	case HXGE_PUT_TCAM:
2577 	case HXGE_GET_TCAM:
2578 	case HXGE_RTRACE:
2579 
2580 		need_privilege = B_FALSE;
2581 		break;
2582 	}
2583 
2584 	if (need_privilege) {
2585 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2586 		if (err != 0) {
2587 			miocnak(wq, mp, 0, err);
2588 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2589 			    "<== hxge_m_ioctl: no priv"));
2590 			return;
2591 		}
2592 	}
2593 
2594 	switch (cmd) {
2595 	case ND_GET:
2596 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2597 	case ND_SET:
2598 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2599 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2600 		break;
2601 
2602 	case LB_GET_MODE:
2603 	case LB_SET_MODE:
2604 	case LB_GET_INFO_SIZE:
2605 	case LB_GET_INFO:
2606 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2607 		break;
2608 
2609 	case HXGE_PUT_TCAM:
2610 	case HXGE_GET_TCAM:
2611 	case HXGE_GET_TX_RING_SZ:
2612 	case HXGE_GET_TX_DESC:
2613 	case HXGE_TX_SIDE_RESET:
2614 	case HXGE_RX_SIDE_RESET:
2615 	case HXGE_GLOBAL_RESET:
2616 	case HXGE_RESET_MAC:
2617 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2618 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2619 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2620 		break;
2621 	}
2622 
2623 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2624 }
2625 
2626 /*ARGSUSED*/
2627 static int
2628 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2629 {
2630 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)rdriver;
2631 	p_hxge_t		hxgep;
2632 	p_tx_ring_t		ring;
2633 
2634 	ASSERT(rhp != NULL);
2635 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2636 
2637 	hxgep = rhp->hxgep;
2638 
2639 	/*
2640 	 * Get the ring pointer.
2641 	 */
2642 	ring = hxgep->tx_rings->rings[rhp->index];
2643 
2644 	/*
2645 	 * Fill in the handle for the transmit.
2646 	 */
2647 	MUTEX_ENTER(&ring->lock);
2648 	rhp->started = B_TRUE;
2649 	ring->ring_handle = rhp->ring_handle;
2650 	MUTEX_EXIT(&ring->lock);
2651 
2652 	return (0);
2653 }
2654 
2655 static void
2656 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2657 {
2658 	p_hxge_ring_handle_t    rhp = (p_hxge_ring_handle_t)rdriver;
2659 	p_hxge_t		hxgep;
2660 	p_tx_ring_t		ring;
2661 
2662 	ASSERT(rhp != NULL);
2663 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2664 
2665 	hxgep = rhp->hxgep;
2666 	ring = hxgep->tx_rings->rings[rhp->index];
2667 
2668 	MUTEX_ENTER(&ring->lock);
2669 	ring->ring_handle = (mac_ring_handle_t)NULL;
2670 	rhp->started = B_FALSE;
2671 	MUTEX_EXIT(&ring->lock);
2672 }
2673 
2674 static int
2675 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2676 {
2677 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)rdriver;
2678 	p_hxge_t		hxgep;
2679 	p_rx_rcr_ring_t		ring;
2680 	int			i;
2681 
2682 	ASSERT(rhp != NULL);
2683 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2684 
2685 	hxgep = rhp->hxgep;
2686 
2687 	/*
2688 	 * Get pointer to ring.
2689 	 */
2690 	ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2691 
2692 	MUTEX_ENTER(&ring->lock);
2693 
2694 	if (rhp->started) {
2695 		MUTEX_EXIT(&ring->lock);
2696 		return (0);
2697 	}
2698 
2699 	/*
2700 	 * Set the ldvp and ldgp pointers to enable/disable
2701 	 * polling.
2702 	 */
2703 	for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2704 		if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2705 		    (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2706 			ring->ldvp = &hxgep->ldgvp->ldvp[i];
2707 			ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2708 			break;
2709 		}
2710 	}
2711 
2712 	rhp->started = B_TRUE;
2713 	ring->rcr_mac_handle = rhp->ring_handle;
2714 	ring->rcr_gen_num = mr_gen_num;
2715 	MUTEX_EXIT(&ring->lock);
2716 
2717 	return (0);
2718 }
2719 
2720 static void
2721 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2722 {
2723 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)rdriver;
2724 	p_hxge_t		hxgep;
2725 	p_rx_rcr_ring_t		ring;
2726 
2727 	ASSERT(rhp != NULL);
2728 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2729 
2730 	hxgep = rhp->hxgep;
2731 	ring =  hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2732 
2733 	MUTEX_ENTER(&ring->lock);
2734 	rhp->started = B_TRUE;
2735 	ring->rcr_mac_handle = NULL;
2736 	ring->ldvp = NULL;
2737 	ring->ldgp = NULL;
2738 	MUTEX_EXIT(&ring->lock);
2739 }
2740 
2741 static int
2742 hxge_rx_group_start(mac_group_driver_t gdriver)
2743 {
2744 	hxge_ring_group_t	*group = (hxge_ring_group_t *)gdriver;
2745 
2746 	ASSERT(group->hxgep != NULL);
2747 	ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2748 
2749 	MUTEX_ENTER(group->hxgep->genlock);
2750 	group->started = B_TRUE;
2751 	MUTEX_EXIT(group->hxgep->genlock);
2752 
2753 	return (0);
2754 }
2755 
2756 static void
2757 hxge_rx_group_stop(mac_group_driver_t gdriver)
2758 {
2759 	hxge_ring_group_t	*group = (hxge_ring_group_t *)gdriver;
2760 
2761 	ASSERT(group->hxgep != NULL);
2762 	ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2763 	ASSERT(group->started == B_TRUE);
2764 
2765 	MUTEX_ENTER(group->hxgep->genlock);
2766 	group->started = B_FALSE;
2767 	MUTEX_EXIT(group->hxgep->genlock);
2768 }
2769 
2770 static int
2771 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2772 {
2773 	int	i;
2774 
2775 	/*
2776 	 * Find an open slot.
2777 	 */
2778 	for (i = 0; i < hxgep->mmac.total; i++) {
2779 		if (!hxgep->mmac.addrs[i].set) {
2780 			*slot = i;
2781 			return (0);
2782 		}
2783 	}
2784 
2785 	return (ENXIO);
2786 }
2787 
2788 static int
2789 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2790 {
2791 	struct ether_addr	eaddr;
2792 	hxge_status_t		status = HXGE_OK;
2793 
2794 	bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2795 
2796 	/*
2797 	 * Set new interface local address and re-init device.
2798 	 * This is destructive to any other streams attached
2799 	 * to this device.
2800 	 */
2801 	RW_ENTER_WRITER(&hxgep->filter_lock);
2802 	status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2803 	RW_EXIT(&hxgep->filter_lock);
2804 	if (status != HXGE_OK)
2805 		return (status);
2806 
2807 	hxgep->mmac.addrs[slot].set = B_TRUE;
2808 	bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2809 	hxgep->mmac.available--;
2810 	if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2811 		hxgep->mmac.addrs[slot].primary = B_TRUE;
2812 
2813 	return (0);
2814 }
2815 
2816 static int
2817 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2818 {
2819 	int	i, result;
2820 
2821 	for (i = 0; i < hxgep->mmac.total; i++) {
2822 		if (hxgep->mmac.addrs[i].set) {
2823 			result = memcmp(hxgep->mmac.addrs[i].addr,
2824 			    addr, ETHERADDRL);
2825 			if (result == 0) {
2826 				*slot = i;
2827 				return (0);
2828 			}
2829 		}
2830 	}
2831 
2832 	return (EINVAL);
2833 }
2834 
2835 static int
2836 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2837 {
2838 	hxge_status_t	status;
2839 	int		i;
2840 
2841 	status = hxge_pfc_clear_mac_address(hxgep, slot);
2842 	if (status != HXGE_OK)
2843 		return (status);
2844 
2845 	for (i = 0; i < ETHERADDRL; i++)
2846 		hxgep->mmac.addrs[slot].addr[i] = 0;
2847 
2848 	hxgep->mmac.addrs[slot].set = B_FALSE;
2849 	if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2850 		hxgep->mmac.addrs[slot].primary = B_FALSE;
2851 	hxgep->mmac.available++;
2852 
2853 	return (0);
2854 }
2855 
2856 static int
2857 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2858 {
2859 	hxge_ring_group_t	*group = arg;
2860 	p_hxge_t		hxgep = group->hxgep;
2861 	int			slot = 0;
2862 
2863 	ASSERT(group->type == MAC_RING_TYPE_RX);
2864 
2865 	MUTEX_ENTER(hxgep->genlock);
2866 
2867 	/*
2868 	 * Find a slot for the address.
2869 	 */
2870 	if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2871 		MUTEX_EXIT(hxgep->genlock);
2872 		return (ENOSPC);
2873 	}
2874 
2875 	/*
2876 	 * Program the MAC address.
2877 	 */
2878 	if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2879 		MUTEX_EXIT(hxgep->genlock);
2880 		return (ENOSPC);
2881 	}
2882 
2883 	MUTEX_EXIT(hxgep->genlock);
2884 	return (0);
2885 }
2886 
2887 static int
2888 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2889 {
2890 	hxge_ring_group_t	*group = arg;
2891 	p_hxge_t		hxgep = group->hxgep;
2892 	int			rv, slot;
2893 
2894 	ASSERT(group->type == MAC_RING_TYPE_RX);
2895 
2896 	MUTEX_ENTER(hxgep->genlock);
2897 
2898 	if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2899 		MUTEX_EXIT(hxgep->genlock);
2900 		return (rv);
2901 	}
2902 
2903 	if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2904 		MUTEX_EXIT(hxgep->genlock);
2905 		return (rv);
2906 	}
2907 
2908 	MUTEX_EXIT(hxgep->genlock);
2909 	return (0);
2910 }
2911 
2912 static void
2913 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2914     mac_group_info_t *infop, mac_group_handle_t gh)
2915 {
2916 	p_hxge_t		hxgep = arg;
2917 	hxge_ring_group_t	*group;
2918 
2919 	ASSERT(type == MAC_RING_TYPE_RX);
2920 
2921 	switch (type) {
2922 	case MAC_RING_TYPE_RX:
2923 		group = &hxgep->rx_groups[groupid];
2924 		group->hxgep = hxgep;
2925 		group->ghandle = gh;
2926 		group->index = groupid;
2927 		group->type = type;
2928 
2929 		infop->mgi_driver = (mac_group_driver_t)group;
2930 		infop->mgi_start = hxge_rx_group_start;
2931 		infop->mgi_stop = hxge_rx_group_stop;
2932 		infop->mgi_addmac = hxge_rx_group_add_mac;
2933 		infop->mgi_remmac = hxge_rx_group_rem_mac;
2934 		infop->mgi_count = HXGE_MAX_RDCS;
2935 		break;
2936 
2937 	case MAC_RING_TYPE_TX:
2938 	default:
2939 		break;
2940 	}
2941 }
2942 
2943 /*
2944  * Callback function for the GLDv3 layer to register all rings.
2945  */
2946 /*ARGSUSED*/
2947 static void
2948 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2949     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2950 {
2951 	p_hxge_t	hxgep = arg;
2952 
2953 	switch (type) {
2954 	case MAC_RING_TYPE_TX: {
2955 		p_hxge_ring_handle_t	rhp;
2956 
2957 		ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2958 		rhp = &hxgep->tx_ring_handles[index];
2959 		rhp->hxgep = hxgep;
2960 		rhp->index = index;
2961 		rhp->ring_handle = rh;
2962 		infop->mri_driver = (mac_ring_driver_t)rhp;
2963 		infop->mri_start = hxge_tx_ring_start;
2964 		infop->mri_stop = hxge_tx_ring_stop;
2965 		infop->mri_tx = hxge_tx_ring_send;
2966 		break;
2967 	}
2968 	case MAC_RING_TYPE_RX: {
2969 		p_hxge_ring_handle_t    rhp;
2970 		mac_intr_t		hxge_mac_intr;
2971 
2972 		ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
2973 		rhp = &hxgep->rx_ring_handles[index];
2974 		rhp->hxgep = hxgep;
2975 		rhp->index = index;
2976 		rhp->ring_handle = rh;
2977 
2978 		/*
2979 		 * Entrypoint to enable interrupt (disable poll) and
2980 		 * disable interrupt (enable poll).
2981 		 */
2982 		hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
2983 		hxge_mac_intr.mi_enable =
2984 		    (mac_intr_enable_t)hxge_disable_poll;
2985 		hxge_mac_intr.mi_disable =
2986 		    (mac_intr_disable_t)hxge_enable_poll;
2987 		infop->mri_driver = (mac_ring_driver_t)rhp;
2988 		infop->mri_start = hxge_rx_ring_start;
2989 		infop->mri_stop = hxge_rx_ring_stop;
2990 		infop->mri_intr = hxge_mac_intr;
2991 		infop->mri_poll = hxge_rx_poll;
2992 		break;
2993 	}
2994 	default:
2995 		break;
2996 	}
2997 }
2998 
2999 /*ARGSUSED*/
3000 boolean_t
3001 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3002 {
3003 	p_hxge_t	hxgep = arg;
3004 
3005 	switch (cap) {
3006 	case MAC_CAPAB_HCKSUM: {
3007 		uint32_t	*txflags = cap_data;
3008 
3009 		*txflags = HCKSUM_INET_PARTIAL;
3010 		break;
3011 	}
3012 
3013 	case MAC_CAPAB_RINGS: {
3014 		mac_capab_rings_t	*cap_rings = cap_data;
3015 
3016 		MUTEX_ENTER(hxgep->genlock);
3017 		if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3018 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3019 			cap_rings->mr_rnum = HXGE_MAX_RDCS;
3020 			cap_rings->mr_rget = hxge_fill_ring;
3021 			cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3022 			cap_rings->mr_gget = hxge_group_get;
3023 			cap_rings->mr_gaddring = NULL;
3024 			cap_rings->mr_gremring = NULL;
3025 		} else {
3026 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3027 			cap_rings->mr_rnum = HXGE_MAX_TDCS;
3028 			cap_rings->mr_rget = hxge_fill_ring;
3029 			cap_rings->mr_gnum = 0;
3030 			cap_rings->mr_gget = NULL;
3031 			cap_rings->mr_gaddring = NULL;
3032 			cap_rings->mr_gremring = NULL;
3033 		}
3034 		MUTEX_EXIT(hxgep->genlock);
3035 		break;
3036 	}
3037 
3038 	default:
3039 		return (B_FALSE);
3040 	}
3041 	return (B_TRUE);
3042 }
3043 
3044 static boolean_t
3045 hxge_param_locked(mac_prop_id_t pr_num)
3046 {
3047 	/*
3048 	 * All adv_* parameters are locked (read-only) while
3049 	 * the device is in any sort of loopback mode ...
3050 	 */
3051 	switch (pr_num) {
3052 		case MAC_PROP_ADV_1000FDX_CAP:
3053 		case MAC_PROP_EN_1000FDX_CAP:
3054 		case MAC_PROP_ADV_1000HDX_CAP:
3055 		case MAC_PROP_EN_1000HDX_CAP:
3056 		case MAC_PROP_ADV_100FDX_CAP:
3057 		case MAC_PROP_EN_100FDX_CAP:
3058 		case MAC_PROP_ADV_100HDX_CAP:
3059 		case MAC_PROP_EN_100HDX_CAP:
3060 		case MAC_PROP_ADV_10FDX_CAP:
3061 		case MAC_PROP_EN_10FDX_CAP:
3062 		case MAC_PROP_ADV_10HDX_CAP:
3063 		case MAC_PROP_EN_10HDX_CAP:
3064 		case MAC_PROP_AUTONEG:
3065 		case MAC_PROP_FLOWCTRL:
3066 			return (B_TRUE);
3067 	}
3068 	return (B_FALSE);
3069 }
3070 
3071 /*
3072  * callback functions for set/get of properties
3073  */
3074 static int
3075 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3076     uint_t pr_valsize, const void *pr_val)
3077 {
3078 	hxge_t		*hxgep = barg;
3079 	p_hxge_stats_t	statsp;
3080 	int		err = 0;
3081 	uint32_t	new_mtu, old_framesize, new_framesize;
3082 
3083 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3084 
3085 	statsp = hxgep->statsp;
3086 	MUTEX_ENTER(hxgep->genlock);
3087 	if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3088 	    hxge_param_locked(pr_num)) {
3089 		/*
3090 		 * All adv_* parameters are locked (read-only)
3091 		 * while the device is in any sort of loopback mode.
3092 		 */
3093 		HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3094 		    "==> hxge_m_setprop: loopback mode: read only"));
3095 		MUTEX_EXIT(hxgep->genlock);
3096 		return (EBUSY);
3097 	}
3098 
3099 	switch (pr_num) {
3100 		/*
3101 		 * These properties are either not exist or read only
3102 		 */
3103 		case MAC_PROP_EN_1000FDX_CAP:
3104 		case MAC_PROP_EN_100FDX_CAP:
3105 		case MAC_PROP_EN_10FDX_CAP:
3106 		case MAC_PROP_EN_1000HDX_CAP:
3107 		case MAC_PROP_EN_100HDX_CAP:
3108 		case MAC_PROP_EN_10HDX_CAP:
3109 		case MAC_PROP_ADV_1000FDX_CAP:
3110 		case MAC_PROP_ADV_1000HDX_CAP:
3111 		case MAC_PROP_ADV_100FDX_CAP:
3112 		case MAC_PROP_ADV_100HDX_CAP:
3113 		case MAC_PROP_ADV_10FDX_CAP:
3114 		case MAC_PROP_ADV_10HDX_CAP:
3115 		case MAC_PROP_STATUS:
3116 		case MAC_PROP_SPEED:
3117 		case MAC_PROP_DUPLEX:
3118 		case MAC_PROP_AUTONEG:
3119 		/*
3120 		 * Flow control is handled in the shared domain and
3121 		 * it is readonly here.
3122 		 */
3123 		case MAC_PROP_FLOWCTRL:
3124 			err = EINVAL;
3125 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3126 			    "==> hxge_m_setprop:  read only property %d",
3127 			    pr_num));
3128 			break;
3129 
3130 		case MAC_PROP_MTU:
3131 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3132 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3133 			    "==> hxge_m_setprop: set MTU: %d", new_mtu));
3134 
3135 			new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3136 			if (new_framesize == hxgep->vmac.maxframesize) {
3137 				err = 0;
3138 				break;
3139 			}
3140 
3141 			if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3142 				err = EBUSY;
3143 				break;
3144 			}
3145 
3146 			if (new_framesize < MIN_FRAME_SIZE ||
3147 			    new_framesize > MAX_FRAME_SIZE) {
3148 				err = EINVAL;
3149 				break;
3150 			}
3151 
3152 			old_framesize = hxgep->vmac.maxframesize;
3153 			hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3154 
3155 			if (hxge_vmac_set_framesize(hxgep)) {
3156 				hxgep->vmac.maxframesize =
3157 				    (uint16_t)old_framesize;
3158 				err = EINVAL;
3159 				break;
3160 			}
3161 
3162 			err = mac_maxsdu_update(hxgep->mach, new_mtu);
3163 			if (err) {
3164 				hxgep->vmac.maxframesize =
3165 				    (uint16_t)old_framesize;
3166 				(void) hxge_vmac_set_framesize(hxgep);
3167 			}
3168 
3169 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3170 			    "==> hxge_m_setprop: set MTU: %d maxframe %d",
3171 			    new_mtu, hxgep->vmac.maxframesize));
3172 			break;
3173 
3174 		case MAC_PROP_PRIVATE:
3175 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3176 			    "==> hxge_m_setprop: private property"));
3177 			err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3178 			    pr_val);
3179 			break;
3180 
3181 		default:
3182 			err = ENOTSUP;
3183 			break;
3184 	}
3185 
3186 	MUTEX_EXIT(hxgep->genlock);
3187 
3188 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3189 	    "<== hxge_m_setprop (return %d)", err));
3190 
3191 	return (err);
3192 }
3193 
3194 /* ARGSUSED */
3195 static int
3196 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
3197     void *pr_val)
3198 {
3199 	int		err = 0;
3200 	link_flowctrl_t	fl;
3201 
3202 	switch (pr_num) {
3203 	case MAC_PROP_DUPLEX:
3204 		*(uint8_t *)pr_val = 2;
3205 		break;
3206 	case MAC_PROP_AUTONEG:
3207 		*(uint8_t *)pr_val = 0;
3208 		break;
3209 	case MAC_PROP_FLOWCTRL:
3210 		if (pr_valsize < sizeof (link_flowctrl_t))
3211 			return (EINVAL);
3212 		fl = LINK_FLOWCTRL_TX;
3213 		bcopy(&fl, pr_val, sizeof (fl));
3214 		break;
3215 	default:
3216 		err = ENOTSUP;
3217 		break;
3218 	}
3219 	return (err);
3220 }
3221 
3222 static int
3223 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3224     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3225 {
3226 	hxge_t 		*hxgep = barg;
3227 	p_hxge_stats_t	statsp = hxgep->statsp;
3228 	int		err = 0;
3229 	link_flowctrl_t fl;
3230 	uint64_t	tmp = 0;
3231 	link_state_t	ls;
3232 
3233 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3234 	    "==> hxge_m_getprop: pr_num %d", pr_num));
3235 
3236 	if (pr_valsize == 0)
3237 		return (EINVAL);
3238 
3239 	*perm = MAC_PROP_PERM_RW;
3240 
3241 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3242 		err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val);
3243 		return (err);
3244 	}
3245 
3246 	bzero(pr_val, pr_valsize);
3247 	switch (pr_num) {
3248 		case MAC_PROP_DUPLEX:
3249 			*perm = MAC_PROP_PERM_READ;
3250 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3251 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3252 			    "==> hxge_m_getprop: duplex mode %d",
3253 			    *(uint8_t *)pr_val));
3254 			break;
3255 
3256 		case MAC_PROP_SPEED:
3257 			*perm = MAC_PROP_PERM_READ;
3258 			if (pr_valsize < sizeof (uint64_t))
3259 				return (EINVAL);
3260 			tmp = statsp->mac_stats.link_speed * 1000000ull;
3261 			bcopy(&tmp, pr_val, sizeof (tmp));
3262 			break;
3263 
3264 		case MAC_PROP_STATUS:
3265 			*perm = MAC_PROP_PERM_READ;
3266 			if (pr_valsize < sizeof (link_state_t))
3267 				return (EINVAL);
3268 			if (!statsp->mac_stats.link_up)
3269 				ls = LINK_STATE_DOWN;
3270 			else
3271 				ls = LINK_STATE_UP;
3272 			bcopy(&ls, pr_val, sizeof (ls));
3273 			break;
3274 
3275 		case MAC_PROP_FLOWCTRL:
3276 			/*
3277 			 * Flow control is supported by the shared domain and
3278 			 * it is currently transmit only
3279 			 */
3280 			*perm = MAC_PROP_PERM_READ;
3281 			if (pr_valsize < sizeof (link_flowctrl_t))
3282 				return (EINVAL);
3283 			fl = LINK_FLOWCTRL_TX;
3284 			bcopy(&fl, pr_val, sizeof (fl));
3285 			break;
3286 		case MAC_PROP_AUTONEG:
3287 			/* 10G link only and it is not negotiable */
3288 			*perm = MAC_PROP_PERM_READ;
3289 			*(uint8_t *)pr_val = 0;
3290 			break;
3291 		case MAC_PROP_ADV_1000FDX_CAP:
3292 		case MAC_PROP_ADV_100FDX_CAP:
3293 		case MAC_PROP_ADV_10FDX_CAP:
3294 		case MAC_PROP_ADV_1000HDX_CAP:
3295 		case MAC_PROP_ADV_100HDX_CAP:
3296 		case MAC_PROP_ADV_10HDX_CAP:
3297 		case MAC_PROP_EN_1000FDX_CAP:
3298 		case MAC_PROP_EN_100FDX_CAP:
3299 		case MAC_PROP_EN_10FDX_CAP:
3300 		case MAC_PROP_EN_1000HDX_CAP:
3301 		case MAC_PROP_EN_100HDX_CAP:
3302 		case MAC_PROP_EN_10HDX_CAP:
3303 			err = ENOTSUP;
3304 			break;
3305 
3306 		case MAC_PROP_PRIVATE:
3307 			err = hxge_get_priv_prop(hxgep, pr_name, pr_flags,
3308 			    pr_valsize, pr_val);
3309 			break;
3310 		case MAC_PROP_MTU: {
3311 			mac_propval_range_t range;
3312 
3313 			if (!(pr_flags & MAC_PROP_POSSIBLE))
3314 				return (ENOTSUP);
3315 			if (pr_valsize < sizeof (mac_propval_range_t))
3316 				return (EINVAL);
3317 			range.mpr_count = 1;
3318 			range.mpr_type = MAC_PROPVAL_UINT32;
3319 			range.range_uint32[0].mpur_min = MIN_FRAME_SIZE -
3320 			    MTU_TO_FRAME_SIZE;
3321 			range.range_uint32[0].mpur_max = MAX_FRAME_SIZE -
3322 			    MTU_TO_FRAME_SIZE;
3323 			bcopy(&range, pr_val, sizeof (range));
3324 			break;
3325 		}
3326 		default:
3327 			err = EINVAL;
3328 			break;
3329 	}
3330 
3331 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3332 
3333 	return (err);
3334 }
3335 
3336 /* ARGSUSED */
3337 static int
3338 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3339     const void *pr_val)
3340 {
3341 	p_hxge_param_t	param_arr = hxgep->param_arr;
3342 	int		err = 0;
3343 
3344 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3345 	    "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3346 
3347 	if (pr_val == NULL) {
3348 		return (EINVAL);
3349 	}
3350 
3351 	/* Blanking */
3352 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3353 		err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3354 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3355 	} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3356 		err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3357 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3358 
3359 	/* Classification */
3360 	} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3361 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3362 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3363 	} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3364 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3365 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3366 	} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3367 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3368 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3369 	} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3370 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3371 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3372 	} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3373 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3374 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3375 	} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3376 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3377 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3378 	} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3379 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3380 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3381 	} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3382 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3383 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3384 	} else {
3385 		err = EINVAL;
3386 	}
3387 
3388 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3389 	    "<== hxge_set_priv_prop: err %d", err));
3390 
3391 	return (err);
3392 }
3393 
3394 static int
3395 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags,
3396     uint_t pr_valsize, void *pr_val)
3397 {
3398 	p_hxge_param_t	param_arr = hxgep->param_arr;
3399 	char		valstr[MAXNAMELEN];
3400 	int		err = 0;
3401 	uint_t		strsize;
3402 	int		value = 0;
3403 
3404 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3405 	    "==> hxge_get_priv_prop: property %s", pr_name));
3406 
3407 	if (pr_flags & MAC_PROP_DEFAULT) {
3408 		/* Receive Interrupt Blanking Parameters */
3409 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3410 			value = RXDMA_RCR_TO_DEFAULT;
3411 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3412 			value = RXDMA_RCR_PTHRES_DEFAULT;
3413 
3414 		/* Classification and Load Distribution Configuration */
3415 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3416 		    strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3417 		    strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3418 		    strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3419 		    strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3420 		    strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3421 		    strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3422 		    strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3423 			value = HXGE_CLASS_TCAM_LOOKUP;
3424 		} else {
3425 			err = EINVAL;
3426 		}
3427 	} else {
3428 		/* Receive Interrupt Blanking Parameters */
3429 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3430 			value = hxgep->intr_timeout;
3431 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3432 			value = hxgep->intr_threshold;
3433 
3434 		/* Classification and Load Distribution Configuration */
3435 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3436 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3437 			    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3438 
3439 			value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3440 		} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3441 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3442 			    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3443 
3444 			value = (int)param_arr[param_class_opt_ipv4_udp].value;
3445 		} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3446 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3447 			    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3448 
3449 			value = (int)param_arr[param_class_opt_ipv4_ah].value;
3450 		} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3451 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3452 			    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3453 
3454 			value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3455 		} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3456 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3457 			    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3458 
3459 			value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3460 		} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3461 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3462 			    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3463 
3464 			value = (int)param_arr[param_class_opt_ipv6_udp].value;
3465 		} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3466 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3467 			    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3468 
3469 			value = (int)param_arr[param_class_opt_ipv6_ah].value;
3470 		} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3471 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3472 			    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3473 
3474 			value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3475 		} else {
3476 			err = EINVAL;
3477 		}
3478 	}
3479 
3480 	if (err == 0) {
3481 		(void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3482 
3483 		strsize = (uint_t)strlen(valstr);
3484 		if (pr_valsize < strsize) {
3485 			err = ENOBUFS;
3486 		} else {
3487 			(void) strlcpy(pr_val, valstr, pr_valsize);
3488 		}
3489 	}
3490 
3491 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3492 	    "<== hxge_get_priv_prop: return %d", err));
3493 
3494 	return (err);
3495 }
3496 /*
3497  * Module loading and removing entry points.
3498  */
3499 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3500     nodev, NULL, D_MP, NULL, NULL);
3501 
3502 extern struct mod_ops mod_driverops;
3503 
3504 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
3505 
3506 /*
3507  * Module linkage information for the kernel.
3508  */
3509 static struct modldrv hxge_modldrv = {
3510 	&mod_driverops,
3511 	HXGE_DESC_VER,
3512 	&hxge_dev_ops
3513 };
3514 
3515 static struct modlinkage modlinkage = {
3516 	MODREV_1, (void *) &hxge_modldrv, NULL
3517 };
3518 
3519 int
3520 _init(void)
3521 {
3522 	int status;
3523 
3524 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3525 	mac_init_ops(&hxge_dev_ops, "hxge");
3526 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3527 	if (status != 0) {
3528 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3529 		    "failed to init device soft state"));
3530 		mac_fini_ops(&hxge_dev_ops);
3531 		goto _init_exit;
3532 	}
3533 
3534 	status = mod_install(&modlinkage);
3535 	if (status != 0) {
3536 		ddi_soft_state_fini(&hxge_list);
3537 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3538 		goto _init_exit;
3539 	}
3540 
3541 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3542 
3543 _init_exit:
3544 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3545 
3546 	return (status);
3547 }
3548 
3549 int
3550 _fini(void)
3551 {
3552 	int status;
3553 
3554 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3555 
3556 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3557 
3558 	if (hxge_mblks_pending)
3559 		return (EBUSY);
3560 
3561 	status = mod_remove(&modlinkage);
3562 	if (status != DDI_SUCCESS) {
3563 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
3564 		    "Module removal failed 0x%08x", status));
3565 		goto _fini_exit;
3566 	}
3567 
3568 	mac_fini_ops(&hxge_dev_ops);
3569 
3570 	ddi_soft_state_fini(&hxge_list);
3571 
3572 	MUTEX_DESTROY(&hxge_common_lock);
3573 
3574 _fini_exit:
3575 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3576 
3577 	return (status);
3578 }
3579 
3580 int
3581 _info(struct modinfo *modinfop)
3582 {
3583 	int status;
3584 
3585 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3586 	status = mod_info(&modlinkage, modinfop);
3587 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3588 
3589 	return (status);
3590 }
3591 
3592 /*ARGSUSED*/
3593 hxge_status_t
3594 hxge_add_intrs(p_hxge_t hxgep)
3595 {
3596 	int		intr_types;
3597 	int		type = 0;
3598 	int		ddi_status = DDI_SUCCESS;
3599 	hxge_status_t	status = HXGE_OK;
3600 
3601 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3602 
3603 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
3604 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3605 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
3606 	hxgep->hxge_intr_type.intr_added = 0;
3607 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3608 	hxgep->hxge_intr_type.intr_type = 0;
3609 
3610 	if (hxge_msi_enable) {
3611 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3612 	}
3613 
3614 	/* Get the supported interrupt types */
3615 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3616 	    != DDI_SUCCESS) {
3617 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3618 		    "ddi_intr_get_supported_types failed: status 0x%08x",
3619 		    ddi_status));
3620 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3621 	}
3622 
3623 	hxgep->hxge_intr_type.intr_types = intr_types;
3624 
3625 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3626 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3627 
3628 	/*
3629 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3630 	 *	(1): 1 - MSI
3631 	 *	(2): 2 - MSI-X
3632 	 *	others - FIXED
3633 	 */
3634 	switch (hxge_msi_enable) {
3635 	default:
3636 		type = DDI_INTR_TYPE_FIXED;
3637 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3638 		    "use fixed (intx emulation) type %08x", type));
3639 		break;
3640 
3641 	case 2:
3642 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3643 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3644 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3645 			type = DDI_INTR_TYPE_MSIX;
3646 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3647 			    "==> hxge_add_intrs: "
3648 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3649 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3650 			type = DDI_INTR_TYPE_MSI;
3651 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3652 			    "==> hxge_add_intrs: "
3653 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3654 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3655 			type = DDI_INTR_TYPE_FIXED;
3656 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3657 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3658 		}
3659 		break;
3660 
3661 	case 1:
3662 		if (intr_types & DDI_INTR_TYPE_MSI) {
3663 			type = DDI_INTR_TYPE_MSI;
3664 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3665 			    "==> hxge_add_intrs: "
3666 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3667 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3668 			type = DDI_INTR_TYPE_MSIX;
3669 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3670 			    "==> hxge_add_intrs: "
3671 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3672 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3673 			type = DDI_INTR_TYPE_FIXED;
3674 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3675 			    "==> hxge_add_intrs: "
3676 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3677 		}
3678 	}
3679 
3680 	hxgep->hxge_intr_type.intr_type = type;
3681 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3682 	    type == DDI_INTR_TYPE_FIXED) &&
3683 	    hxgep->hxge_intr_type.niu_msi_enable) {
3684 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3685 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3686 			    " hxge_add_intrs: "
3687 			    " hxge_add_intrs_adv failed: status 0x%08x",
3688 			    status));
3689 			return (status);
3690 		} else {
3691 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3692 			    "interrupts registered : type %d", type));
3693 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3694 
3695 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3696 			    "\nAdded advanced hxge add_intr_adv "
3697 			    "intr type 0x%x\n", type));
3698 
3699 			return (status);
3700 		}
3701 	}
3702 
3703 	if (!hxgep->hxge_intr_type.intr_registered) {
3704 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3705 		    "==> hxge_add_intrs: failed to register interrupts"));
3706 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3707 	}
3708 
3709 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3710 
3711 	return (status);
3712 }
3713 
3714 /*ARGSUSED*/
3715 static hxge_status_t
3716 hxge_add_intrs_adv(p_hxge_t hxgep)
3717 {
3718 	int		intr_type;
3719 	p_hxge_intr_t	intrp;
3720 	hxge_status_t	status;
3721 
3722 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3723 
3724 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3725 	intr_type = intrp->intr_type;
3726 
3727 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3728 	    intr_type));
3729 
3730 	switch (intr_type) {
3731 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3732 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3733 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3734 		break;
3735 
3736 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3737 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3738 		break;
3739 
3740 	default:
3741 		status = HXGE_ERROR;
3742 		break;
3743 	}
3744 
3745 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3746 
3747 	return (status);
3748 }
3749 
3750 /*ARGSUSED*/
3751 static hxge_status_t
3752 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3753 {
3754 	dev_info_t	*dip = hxgep->dip;
3755 	p_hxge_ldg_t	ldgp;
3756 	p_hxge_intr_t	intrp;
3757 	uint_t		*inthandler;
3758 	void		*arg1, *arg2;
3759 	int		behavior;
3760 	int		nintrs, navail;
3761 	int		nactual, nrequired, nrequest;
3762 	int		inum = 0;
3763 	int		loop = 0;
3764 	int		x, y;
3765 	int		ddi_status = DDI_SUCCESS;
3766 	hxge_status_t	status = HXGE_OK;
3767 
3768 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3769 
3770 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3771 
3772 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3773 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3774 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3775 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3776 		    "nintrs: %d", ddi_status, nintrs));
3777 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3778 	}
3779 
3780 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3781 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3782 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3783 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3784 		    "nintrs: %d", ddi_status, navail));
3785 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3786 	}
3787 
3788 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3789 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3790 	    int_type, nintrs, navail));
3791 
3792 	/* PSARC/2007/453 MSI-X interrupt limit override */
3793 	if (int_type == DDI_INTR_TYPE_MSIX) {
3794 		nrequest = hxge_create_msi_property(hxgep);
3795 		if (nrequest < navail) {
3796 			navail = nrequest;
3797 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3798 			    "hxge_add_intrs_adv_type: nintrs %d "
3799 			    "navail %d (nrequest %d)",
3800 			    nintrs, navail, nrequest));
3801 		}
3802 	}
3803 
3804 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3805 		/* MSI must be power of 2 */
3806 		if ((navail & 16) == 16) {
3807 			navail = 16;
3808 		} else if ((navail & 8) == 8) {
3809 			navail = 8;
3810 		} else if ((navail & 4) == 4) {
3811 			navail = 4;
3812 		} else if ((navail & 2) == 2) {
3813 			navail = 2;
3814 		} else {
3815 			navail = 1;
3816 		}
3817 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3818 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3819 		    "navail %d", nintrs, navail));
3820 	}
3821 
3822 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3823 	    "requesting: intr type %d nintrs %d, navail %d",
3824 	    int_type, nintrs, navail));
3825 
3826 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3827 	    DDI_INTR_ALLOC_NORMAL);
3828 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3829 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3830 
3831 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3832 	    navail, &nactual, behavior);
3833 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3834 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3835 		    " ddi_intr_alloc() failed: %d", ddi_status));
3836 		kmem_free(intrp->htable, intrp->intr_size);
3837 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3838 	}
3839 
3840 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3841 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3842 	    navail, nactual));
3843 
3844 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3845 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3846 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3847 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3848 		/* Free already allocated interrupts */
3849 		for (y = 0; y < nactual; y++) {
3850 			(void) ddi_intr_free(intrp->htable[y]);
3851 		}
3852 
3853 		kmem_free(intrp->htable, intrp->intr_size);
3854 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3855 	}
3856 
3857 	nrequired = 0;
3858 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3859 	if (status != HXGE_OK) {
3860 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3861 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3862 		    "failed: 0x%x", status));
3863 		/* Free already allocated interrupts */
3864 		for (y = 0; y < nactual; y++) {
3865 			(void) ddi_intr_free(intrp->htable[y]);
3866 		}
3867 
3868 		kmem_free(intrp->htable, intrp->intr_size);
3869 		return (status);
3870 	}
3871 
3872 	ldgp = hxgep->ldgvp->ldgp;
3873 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3874 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3875 
3876 	if (nactual < nrequired)
3877 		loop = nactual;
3878 	else
3879 		loop = nrequired;
3880 
3881 	for (x = 0; x < loop; x++, ldgp++) {
3882 		ldgp->vector = (uint8_t)x;
3883 		arg1 = ldgp->ldvp;
3884 		arg2 = hxgep;
3885 		if (ldgp->nldvs == 1) {
3886 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3887 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3888 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3889 			    "1-1 int handler (entry %d)\n",
3890 			    arg1, arg2, x));
3891 		} else if (ldgp->nldvs > 1) {
3892 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3893 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3894 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3895 			    "nldevs %d int handler (entry %d)\n",
3896 			    arg1, arg2, ldgp->nldvs, x));
3897 		}
3898 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3899 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3900 		    "htable 0x%llx", x, intrp->htable[x]));
3901 
3902 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3903 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3904 		    DDI_SUCCESS) {
3905 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3906 			    "==> hxge_add_intrs_adv_type: failed #%d "
3907 			    "status 0x%x", x, ddi_status));
3908 			for (y = 0; y < intrp->intr_added; y++) {
3909 				(void) ddi_intr_remove_handler(
3910 				    intrp->htable[y]);
3911 			}
3912 
3913 			/* Free already allocated intr */
3914 			for (y = 0; y < nactual; y++) {
3915 				(void) ddi_intr_free(intrp->htable[y]);
3916 			}
3917 			kmem_free(intrp->htable, intrp->intr_size);
3918 
3919 			(void) hxge_ldgv_uninit(hxgep);
3920 
3921 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3922 		}
3923 
3924 		intrp->intr_added++;
3925 	}
3926 	intrp->msi_intx_cnt = nactual;
3927 
3928 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3929 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3930 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3931 
3932 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3933 	(void) hxge_intr_ldgv_init(hxgep);
3934 
3935 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3936 
3937 	return (status);
3938 }
3939 
3940 /*ARGSUSED*/
3941 static hxge_status_t
3942 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3943 {
3944 	dev_info_t	*dip = hxgep->dip;
3945 	p_hxge_ldg_t	ldgp;
3946 	p_hxge_intr_t	intrp;
3947 	uint_t		*inthandler;
3948 	void		*arg1, *arg2;
3949 	int		behavior;
3950 	int		nintrs, navail;
3951 	int		nactual, nrequired;
3952 	int		inum = 0;
3953 	int		x, y;
3954 	int		ddi_status = DDI_SUCCESS;
3955 	hxge_status_t	status = HXGE_OK;
3956 
3957 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3958 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3959 
3960 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3961 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3962 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3963 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3964 		    "nintrs: %d", status, nintrs));
3965 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3966 	}
3967 
3968 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3969 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3970 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3971 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3972 		    "nintrs: %d", ddi_status, navail));
3973 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3974 	}
3975 
3976 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3977 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3978 	    nintrs, navail));
3979 
3980 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3981 	    DDI_INTR_ALLOC_NORMAL);
3982 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3983 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
3984 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3985 	    navail, &nactual, behavior);
3986 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3987 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3988 		    " ddi_intr_alloc() failed: %d", ddi_status));
3989 		kmem_free(intrp->htable, intrp->intr_size);
3990 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3991 	}
3992 
3993 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3994 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3995 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3996 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3997 		/* Free already allocated interrupts */
3998 		for (y = 0; y < nactual; y++) {
3999 			(void) ddi_intr_free(intrp->htable[y]);
4000 		}
4001 
4002 		kmem_free(intrp->htable, intrp->intr_size);
4003 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4004 	}
4005 
4006 	nrequired = 0;
4007 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4008 	if (status != HXGE_OK) {
4009 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4010 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4011 		    "failed: 0x%x", status));
4012 		/* Free already allocated interrupts */
4013 		for (y = 0; y < nactual; y++) {
4014 			(void) ddi_intr_free(intrp->htable[y]);
4015 		}
4016 
4017 		kmem_free(intrp->htable, intrp->intr_size);
4018 		return (status);
4019 	}
4020 
4021 	ldgp = hxgep->ldgvp->ldgp;
4022 	for (x = 0; x < nrequired; x++, ldgp++) {
4023 		ldgp->vector = (uint8_t)x;
4024 		arg1 = ldgp->ldvp;
4025 		arg2 = hxgep;
4026 		if (ldgp->nldvs == 1) {
4027 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4028 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4029 			    "hxge_add_intrs_adv_type_fix: "
4030 			    "1-1 int handler(%d) ldg %d ldv %d "
4031 			    "arg1 $%p arg2 $%p\n",
4032 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4033 		} else if (ldgp->nldvs > 1) {
4034 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4035 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4036 			    "hxge_add_intrs_adv_type_fix: "
4037 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
4038 			    "arg1 0x%016llx arg2 0x%016llx\n",
4039 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4040 			    arg1, arg2));
4041 		}
4042 
4043 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4044 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4045 		    DDI_SUCCESS) {
4046 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4047 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
4048 			    "status 0x%x", x, ddi_status));
4049 			for (y = 0; y < intrp->intr_added; y++) {
4050 				(void) ddi_intr_remove_handler(
4051 				    intrp->htable[y]);
4052 			}
4053 			for (y = 0; y < nactual; y++) {
4054 				(void) ddi_intr_free(intrp->htable[y]);
4055 			}
4056 			/* Free already allocated intr */
4057 			kmem_free(intrp->htable, intrp->intr_size);
4058 
4059 			(void) hxge_ldgv_uninit(hxgep);
4060 
4061 			return (HXGE_ERROR | HXGE_DDI_FAILED);
4062 		}
4063 		intrp->intr_added++;
4064 	}
4065 
4066 	intrp->msi_intx_cnt = nactual;
4067 
4068 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4069 
4070 	status = hxge_intr_ldgv_init(hxgep);
4071 
4072 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4073 
4074 	return (status);
4075 }
4076 
4077 /*ARGSUSED*/
4078 static void
4079 hxge_remove_intrs(p_hxge_t hxgep)
4080 {
4081 	int		i, inum;
4082 	p_hxge_intr_t	intrp;
4083 
4084 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4085 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4086 	if (!intrp->intr_registered) {
4087 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4088 		    "<== hxge_remove_intrs: interrupts not registered"));
4089 		return;
4090 	}
4091 
4092 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4093 
4094 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4095 		(void) ddi_intr_block_disable(intrp->htable,
4096 		    intrp->intr_added);
4097 	} else {
4098 		for (i = 0; i < intrp->intr_added; i++) {
4099 			(void) ddi_intr_disable(intrp->htable[i]);
4100 		}
4101 	}
4102 
4103 	for (inum = 0; inum < intrp->intr_added; inum++) {
4104 		if (intrp->htable[inum]) {
4105 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
4106 		}
4107 	}
4108 
4109 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4110 		if (intrp->htable[inum]) {
4111 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4112 			    "hxge_remove_intrs: ddi_intr_free inum %d "
4113 			    "msi_intx_cnt %d intr_added %d",
4114 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
4115 
4116 			(void) ddi_intr_free(intrp->htable[inum]);
4117 		}
4118 	}
4119 
4120 	kmem_free(intrp->htable, intrp->intr_size);
4121 	intrp->intr_registered = B_FALSE;
4122 	intrp->intr_enabled = B_FALSE;
4123 	intrp->msi_intx_cnt = 0;
4124 	intrp->intr_added = 0;
4125 
4126 	(void) hxge_ldgv_uninit(hxgep);
4127 
4128 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4129 }
4130 
4131 /*ARGSUSED*/
4132 void
4133 hxge_intrs_enable(p_hxge_t hxgep)
4134 {
4135 	p_hxge_intr_t	intrp;
4136 	int		i;
4137 	int		status;
4138 
4139 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4140 
4141 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4142 
4143 	if (!intrp->intr_registered) {
4144 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4145 		    "interrupts are not registered"));
4146 		return;
4147 	}
4148 
4149 	if (intrp->intr_enabled) {
4150 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4151 		    "<== hxge_intrs_enable: already enabled"));
4152 		return;
4153 	}
4154 
4155 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4156 		status = ddi_intr_block_enable(intrp->htable,
4157 		    intrp->intr_added);
4158 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4159 		    "block enable - status 0x%x total inums #%d\n",
4160 		    status, intrp->intr_added));
4161 	} else {
4162 		for (i = 0; i < intrp->intr_added; i++) {
4163 			status = ddi_intr_enable(intrp->htable[i]);
4164 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4165 			    "ddi_intr_enable:enable - status 0x%x "
4166 			    "total inums %d enable inum #%d\n",
4167 			    status, intrp->intr_added, i));
4168 			if (status == DDI_SUCCESS) {
4169 				intrp->intr_enabled = B_TRUE;
4170 			}
4171 		}
4172 	}
4173 
4174 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4175 }
4176 
4177 /*ARGSUSED*/
4178 static void
4179 hxge_intrs_disable(p_hxge_t hxgep)
4180 {
4181 	p_hxge_intr_t	intrp;
4182 	int		i;
4183 
4184 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4185 
4186 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4187 
4188 	if (!intrp->intr_registered) {
4189 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4190 		    "interrupts are not registered"));
4191 		return;
4192 	}
4193 
4194 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4195 		(void) ddi_intr_block_disable(intrp->htable,
4196 		    intrp->intr_added);
4197 	} else {
4198 		for (i = 0; i < intrp->intr_added; i++) {
4199 			(void) ddi_intr_disable(intrp->htable[i]);
4200 		}
4201 	}
4202 
4203 	intrp->intr_enabled = B_FALSE;
4204 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4205 }
4206 
4207 static hxge_status_t
4208 hxge_mac_register(p_hxge_t hxgep)
4209 {
4210 	mac_register_t	*macp;
4211 	int		status;
4212 
4213 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4214 
4215 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4216 		return (HXGE_ERROR);
4217 
4218 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4219 	macp->m_driver = hxgep;
4220 	macp->m_dip = hxgep->dip;
4221 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4222 	macp->m_callbacks = &hxge_m_callbacks;
4223 	macp->m_min_sdu = 0;
4224 	macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4225 	macp->m_margin = VLAN_TAGSZ;
4226 	macp->m_priv_props = hxge_priv_props;
4227 	macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS;
4228 	macp->m_v12n = MAC_VIRT_LEVEL1;
4229 
4230 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4231 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4232 	    macp->m_src_addr[0],
4233 	    macp->m_src_addr[1],
4234 	    macp->m_src_addr[2],
4235 	    macp->m_src_addr[3],
4236 	    macp->m_src_addr[4],
4237 	    macp->m_src_addr[5]));
4238 
4239 	status = mac_register(macp, &hxgep->mach);
4240 	mac_free(macp);
4241 
4242 	if (status != 0) {
4243 		cmn_err(CE_WARN,
4244 		    "hxge_mac_register failed (status %d instance %d)",
4245 		    status, hxgep->instance);
4246 		return (HXGE_ERROR);
4247 	}
4248 
4249 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4250 	    "(instance %d)", hxgep->instance));
4251 
4252 	return (HXGE_OK);
4253 }
4254 
4255 static int
4256 hxge_init_common_dev(p_hxge_t hxgep)
4257 {
4258 	p_hxge_hw_list_t	hw_p;
4259 	dev_info_t		*p_dip;
4260 
4261 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4262 
4263 	p_dip = hxgep->p_dip;
4264 	MUTEX_ENTER(&hxge_common_lock);
4265 
4266 	/*
4267 	 * Loop through existing per Hydra hardware list.
4268 	 */
4269 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4270 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4271 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4272 		    hw_p, p_dip));
4273 		if (hw_p->parent_devp == p_dip) {
4274 			hxgep->hxge_hw_p = hw_p;
4275 			hw_p->ndevs++;
4276 			hw_p->hxge_p = hxgep;
4277 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4278 			    "==> hxge_init_common_device: "
4279 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4280 			    hw_p, p_dip, hw_p->ndevs));
4281 			break;
4282 		}
4283 	}
4284 
4285 	if (hw_p == NULL) {
4286 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4287 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4288 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4289 		hw_p->parent_devp = p_dip;
4290 		hw_p->magic = HXGE_MAGIC;
4291 		hxgep->hxge_hw_p = hw_p;
4292 		hw_p->ndevs++;
4293 		hw_p->hxge_p = hxgep;
4294 		hw_p->next = hxge_hw_list;
4295 
4296 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4297 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4298 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4299 
4300 		hxge_hw_list = hw_p;
4301 	}
4302 	MUTEX_EXIT(&hxge_common_lock);
4303 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4304 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4305 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4306 
4307 	return (HXGE_OK);
4308 }
4309 
4310 static void
4311 hxge_uninit_common_dev(p_hxge_t hxgep)
4312 {
4313 	p_hxge_hw_list_t	hw_p, h_hw_p;
4314 	dev_info_t		*p_dip;
4315 
4316 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4317 	if (hxgep->hxge_hw_p == NULL) {
4318 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4319 		    "<== hxge_uninit_common_dev (no common)"));
4320 		return;
4321 	}
4322 
4323 	MUTEX_ENTER(&hxge_common_lock);
4324 	h_hw_p = hxge_hw_list;
4325 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4326 		p_dip = hw_p->parent_devp;
4327 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4328 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4329 		    hw_p->magic == HXGE_MAGIC) {
4330 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4331 			    "==> hxge_uninit_common_dev: "
4332 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4333 			    hw_p, p_dip, hw_p->ndevs));
4334 
4335 			hxgep->hxge_hw_p = NULL;
4336 			if (hw_p->ndevs) {
4337 				hw_p->ndevs--;
4338 			}
4339 			hw_p->hxge_p = NULL;
4340 			if (!hw_p->ndevs) {
4341 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4342 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4343 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4344 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4345 				    "==> hxge_uninit_common_dev: "
4346 				    "hw_p $%p parent dip $%p ndevs %d (last)",
4347 				    hw_p, p_dip, hw_p->ndevs));
4348 
4349 				if (hw_p == hxge_hw_list) {
4350 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4351 					    "==> hxge_uninit_common_dev:"
4352 					    "remove head "
4353 					    "hw_p $%p parent dip $%p "
4354 					    "ndevs %d (head)",
4355 					    hw_p, p_dip, hw_p->ndevs));
4356 					hxge_hw_list = hw_p->next;
4357 				} else {
4358 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4359 					    "==> hxge_uninit_common_dev:"
4360 					    "remove middle "
4361 					    "hw_p $%p parent dip $%p "
4362 					    "ndevs %d (middle)",
4363 					    hw_p, p_dip, hw_p->ndevs));
4364 					h_hw_p->next = hw_p->next;
4365 				}
4366 
4367 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4368 			}
4369 			break;
4370 		} else {
4371 			h_hw_p = hw_p;
4372 		}
4373 	}
4374 
4375 	MUTEX_EXIT(&hxge_common_lock);
4376 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4377 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4378 
4379 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4380 }
4381 
4382 #define	HXGE_MSIX_ENTRIES		32
4383 #define	HXGE_MSIX_WAIT_COUNT		10
4384 #define	HXGE_MSIX_PARITY_CHECK_COUNT	30
4385 
4386 static void
4387 hxge_link_poll(void *arg)
4388 {
4389 	p_hxge_t		hxgep = (p_hxge_t)arg;
4390 	hpi_handle_t		handle;
4391 	cip_link_stat_t		link_stat;
4392 	hxge_timeout		*to = &hxgep->timeout;
4393 
4394 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
4395 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4396 
4397 	if (to->report_link_status ||
4398 	    (to->link_status != link_stat.bits.xpcs0_link_up)) {
4399 		to->link_status = link_stat.bits.xpcs0_link_up;
4400 		to->report_link_status = B_FALSE;
4401 
4402 		if (link_stat.bits.xpcs0_link_up) {
4403 			hxge_link_update(hxgep, LINK_STATE_UP);
4404 		} else {
4405 			hxge_link_update(hxgep, LINK_STATE_DOWN);
4406 		}
4407 	}
4408 
4409 	if (hxgep->msix_count++ >= HXGE_MSIX_PARITY_CHECK_COUNT) {
4410 		hxgep->msix_count = 0;
4411 		hxgep->msix_index++;
4412 		if (hxgep->msix_index >= HXGE_MSIX_ENTRIES)
4413 			hxgep->msix_index = 0;
4414 		hxge_check_1entry_msix_table(hxgep, hxgep->msix_index);
4415 	}
4416 
4417 	/* Restart the link status timer to check the link status */
4418 	MUTEX_ENTER(&to->lock);
4419 	to->id = timeout(hxge_link_poll, arg, to->ticks);
4420 	MUTEX_EXIT(&to->lock);
4421 }
4422 
4423 static void
4424 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4425 {
4426 	p_hxge_stats_t		statsp = (p_hxge_stats_t)hxgep->statsp;
4427 
4428 	mac_link_update(hxgep->mach, state);
4429 	if (state == LINK_STATE_UP) {
4430 		statsp->mac_stats.link_speed = 10000;
4431 		statsp->mac_stats.link_duplex = 2;
4432 		statsp->mac_stats.link_up = 1;
4433 	} else {
4434 		statsp->mac_stats.link_speed = 0;
4435 		statsp->mac_stats.link_duplex = 0;
4436 		statsp->mac_stats.link_up = 0;
4437 	}
4438 }
4439 
4440 static void
4441 hxge_msix_init(p_hxge_t hxgep)
4442 {
4443 	uint32_t 		data0;
4444 	uint32_t 		data1;
4445 	uint32_t 		data2;
4446 	int			i;
4447 	uint32_t		msix_entry0;
4448 	uint32_t		msix_entry1;
4449 	uint32_t		msix_entry2;
4450 	uint32_t		msix_entry3;
4451 
4452 	/* Change to use MSIx bar instead of indirect access */
4453 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4454 		data0 = 0xffffffff - i;
4455 		data1 = 0xffffffff - i - 1;
4456 		data2 = 0xffffffff - i - 2;
4457 
4458 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4459 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4460 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4461 	}
4462 
4463 	/* Initialize ram data out buffer. */
4464 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4465 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4466 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4467 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4468 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4469 	}
4470 }
4471 
4472 static void
4473 hxge_store_msix_table(p_hxge_t hxgep)
4474 {
4475 	int			i;
4476 	uint32_t		msix_entry0;
4477 	uint32_t		msix_entry1;
4478 	uint32_t		msix_entry2;
4479 
4480 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4481 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4482 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4,
4483 		    &msix_entry1);
4484 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8,
4485 		    &msix_entry2);
4486 
4487 		hxgep->msix_table[i][0] = msix_entry0;
4488 		hxgep->msix_table[i][1] = msix_entry1;
4489 		hxgep->msix_table[i][2] = msix_entry2;
4490 	}
4491 }
4492 
4493 static void
4494 hxge_check_1entry_msix_table(p_hxge_t hxgep, int i)
4495 {
4496 	uint32_t		msix_entry0;
4497 	uint32_t		msix_entry1;
4498 	uint32_t		msix_entry2;
4499 	p_hxge_peu_sys_stats_t	statsp;
4500 
4501 	statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
4502 
4503 	HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4504 	HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4505 	HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4506 
4507 	hxgep->msix_table_check[i][0] = msix_entry0;
4508 	hxgep->msix_table_check[i][1] = msix_entry1;
4509 	hxgep->msix_table_check[i][2] = msix_entry2;
4510 
4511 	if ((hxgep->msix_table[i][0] != hxgep->msix_table_check[i][0]) ||
4512 	    (hxgep->msix_table[i][1] != hxgep->msix_table_check[i][1]) ||
4513 	    (hxgep->msix_table[i][2] != hxgep->msix_table_check[i][2])) {
4514 		statsp->eic_msix_parerr++;
4515 		if (statsp->eic_msix_parerr == 1) {
4516 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4517 			    "==> hxge_check_1entry_msix_table: "
4518 			    "eic_msix_parerr at index: %d", i));
4519 			HXGE_FM_REPORT_ERROR(hxgep, NULL,
4520 			    HXGE_FM_EREPORT_PEU_ERR);
4521 		}
4522 	}
4523 }
4524 
4525 /*
4526  * The following function is to support
4527  * PSARC/2007/453 MSI-X interrupt limit override.
4528  */
4529 static int
4530 hxge_create_msi_property(p_hxge_t hxgep)
4531 {
4532 	int	nmsi;
4533 	extern	int ncpus;
4534 
4535 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4536 
4537 	(void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4538 	    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4539 	/*
4540 	 * The maximum MSI-X requested will be 8.
4541 	 * If the # of CPUs is less than 8, we will reqeust
4542 	 * # MSI-X based on the # of CPUs.
4543 	 */
4544 	if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4545 		nmsi = HXGE_MSIX_REQUEST_10G;
4546 	} else {
4547 		nmsi = ncpus;
4548 	}
4549 
4550 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4551 	    "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4552 	    ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4553 	    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4554 
4555 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4556 	return (nmsi);
4557 }
4558