xref: /illumos-gate/usr/src/uts/common/io/hxge/hxge_main.c (revision 5279807d7e1818eac6f90ac640b7a89cdb37522d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28  */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31 
32 /*
33  * PSARC/2007/453 MSI-X interrupt limit override
34  * (This PSARC case is limited to MSI-X vectors
35  *  and SPARC platforms only).
36  */
37 #if defined(_BIG_ENDIAN)
38 uint32_t hxge_msi_enable = 2;
39 #else
40 uint32_t hxge_msi_enable = 2;
41 #endif
42 
43 /*
44  * Globals: tunable parameters (/etc/system or adb)
45  *
46  */
47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
48 uint32_t hxge_rbr_spare_size = 0;
49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
55 
56 static hxge_os_mutex_t hxgedebuglock;
57 static int hxge_debug_init = 0;
58 
59 /*
60  * Debugging flags:
61  *		hxge_no_tx_lb : transmit load balancing
62  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
63  *				   1 - From the Stack
64  *				   2 - Destination IP Address
65  */
66 uint32_t hxge_no_tx_lb = 0;
67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
68 
69 /*
70  * Tunables to manage the receive buffer blocks.
71  *
72  * hxge_rx_threshold_hi: copy all buffers.
73  * hxge_rx_bcopy_size_type: receive buffer block size type.
74  * hxge_rx_threshold_lo: copy only up to tunable block size type.
75  */
76 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
77 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
78 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
79 
80 rtrace_t hpi_rtracebuf;
81 
82 /*
83  * Function Prototypes
84  */
85 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
86 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
87 static void hxge_unattach(p_hxge_t);
88 
89 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
90 
91 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
92 static void hxge_destroy_mutexes(p_hxge_t);
93 
94 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
95 static void hxge_unmap_regs(p_hxge_t hxgep);
96 
97 hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
98 static void hxge_remove_intrs(p_hxge_t hxgep);
99 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
100 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
101 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
102 void hxge_intrs_enable(p_hxge_t hxgep);
103 static void hxge_intrs_disable(p_hxge_t hxgep);
104 static void hxge_suspend(p_hxge_t);
105 static hxge_status_t hxge_resume(p_hxge_t);
106 hxge_status_t hxge_setup_dev(p_hxge_t);
107 static void hxge_destroy_dev(p_hxge_t);
108 hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
109 static void hxge_free_mem_pool(p_hxge_t);
110 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
111 static void hxge_free_rx_mem_pool(p_hxge_t);
112 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
113 static void hxge_free_tx_mem_pool(p_hxge_t);
114 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
115     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
116     p_hxge_dma_common_t);
117 static void hxge_dma_mem_free(p_hxge_dma_common_t);
118 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
119     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
120 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
121 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
122     p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
123 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
124 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
125     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
126 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
127 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
128     p_hxge_dma_common_t *, size_t);
129 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
130 static int hxge_init_common_dev(p_hxge_t);
131 static void hxge_uninit_common_dev(p_hxge_t);
132 
133 /*
134  * The next declarations are for the GLDv3 interface.
135  */
136 static int hxge_m_start(void *);
137 static void hxge_m_stop(void *);
138 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
139 static int hxge_m_promisc(void *, boolean_t);
140 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
141 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
142 
143 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
144 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
145 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
146     uint_t pr_valsize, const void *pr_val);
147 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
148     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *);
149 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num,
150     uint_t pr_valsize, void *pr_val);
151 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
152     uint_t pr_valsize, const void *pr_val);
153 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
154     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
155 static void hxge_link_poll(void *arg);
156 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
157 static void hxge_msix_init(p_hxge_t hxgep);
158 static void hxge_store_msix_table(p_hxge_t hxgep);
159 static void hxge_check_1entry_msix_table(p_hxge_t hxgep, int msix_index);
160 
161 mac_priv_prop_t hxge_priv_props[] = {
162 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
163 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
164 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
165 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
166 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
167 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
168 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
169 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
170 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
171 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
172 };
173 
174 #define	HXGE_MAX_PRIV_PROPS	\
175 	(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
176 
177 #define	HXGE_MAGIC	0x4E584745UL
178 #define	MAX_DUMP_SZ 256
179 
180 #define	HXGE_M_CALLBACK_FLAGS	\
181 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
182 
183 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
184 
185 static mac_callbacks_t hxge_m_callbacks = {
186 	HXGE_M_CALLBACK_FLAGS,
187 	hxge_m_stat,
188 	hxge_m_start,
189 	hxge_m_stop,
190 	hxge_m_promisc,
191 	hxge_m_multicst,
192 	NULL,
193 	NULL,
194 	hxge_m_ioctl,
195 	hxge_m_getcapab,
196 	NULL,
197 	NULL,
198 	hxge_m_setprop,
199 	hxge_m_getprop
200 };
201 
202 /* PSARC/2007/453 MSI-X interrupt limit override. */
203 #define	HXGE_MSIX_REQUEST_10G	8
204 static int hxge_create_msi_property(p_hxge_t);
205 
206 /* Enable debug messages as necessary. */
207 uint64_t hxge_debug_level = 0;
208 
209 /*
210  * This list contains the instance structures for the Hydra
211  * devices present in the system. The lock exists to guarantee
212  * mutually exclusive access to the list.
213  */
214 void *hxge_list = NULL;
215 void *hxge_hw_list = NULL;
216 hxge_os_mutex_t hxge_common_lock;
217 
218 extern uint64_t hpi_debug_level;
219 
220 extern hxge_status_t hxge_ldgv_init();
221 extern hxge_status_t hxge_ldgv_uninit();
222 extern hxge_status_t hxge_intr_ldgv_init();
223 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
224     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
225 extern void hxge_fm_fini(p_hxge_t hxgep);
226 
227 /*
228  * Count used to maintain the number of buffers being used
229  * by Hydra instances and loaned up to the upper layers.
230  */
231 uint32_t hxge_mblks_pending = 0;
232 
233 /*
234  * Device register access attributes for PIO.
235  */
236 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
237 	DDI_DEVICE_ATTR_V0,
238 	DDI_STRUCTURE_LE_ACC,
239 	DDI_STRICTORDER_ACC,
240 };
241 
242 /*
243  * Device descriptor access attributes for DMA.
244  */
245 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
246 	DDI_DEVICE_ATTR_V0,
247 	DDI_STRUCTURE_LE_ACC,
248 	DDI_STRICTORDER_ACC
249 };
250 
251 /*
252  * Device buffer access attributes for DMA.
253  */
254 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
255 	DDI_DEVICE_ATTR_V0,
256 	DDI_STRUCTURE_BE_ACC,
257 	DDI_STRICTORDER_ACC
258 };
259 
260 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
261 	DMA_ATTR_V0,		/* version number. */
262 	0,			/* low address */
263 	0xffffffffffffffff,	/* high address */
264 	0xffffffffffffffff,	/* address counter max */
265 	0x80000,		/* alignment */
266 	0xfc00fc,		/* dlim_burstsizes */
267 	0x1,			/* minimum transfer size */
268 	0xffffffffffffffff,	/* maximum transfer size */
269 	0xffffffffffffffff,	/* maximum segment size */
270 	1,			/* scatter/gather list length */
271 	(unsigned int)1,	/* granularity */
272 	0			/* attribute flags */
273 };
274 
275 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
276 	DMA_ATTR_V0,		/* version number. */
277 	0,			/* low address */
278 	0xffffffffffffffff,	/* high address */
279 	0xffffffffffffffff,	/* address counter max */
280 	0x100000,		/* alignment */
281 	0xfc00fc,		/* dlim_burstsizes */
282 	0x1,			/* minimum transfer size */
283 	0xffffffffffffffff,	/* maximum transfer size */
284 	0xffffffffffffffff,	/* maximum segment size */
285 	1,			/* scatter/gather list length */
286 	(unsigned int)1,	/* granularity */
287 	0			/* attribute flags */
288 };
289 
290 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
291 	DMA_ATTR_V0,		/* version number. */
292 	0,			/* low address */
293 	0xffffffffffffffff,	/* high address */
294 	0xffffffffffffffff,	/* address counter max */
295 	0x40000,		/* alignment */
296 	0xfc00fc,		/* dlim_burstsizes */
297 	0x1,			/* minimum transfer size */
298 	0xffffffffffffffff,	/* maximum transfer size */
299 	0xffffffffffffffff,	/* maximum segment size */
300 	1,			/* scatter/gather list length */
301 	(unsigned int)1,	/* granularity */
302 	0			/* attribute flags */
303 };
304 
305 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
306 	DMA_ATTR_V0,		/* version number. */
307 	0,			/* low address */
308 	0xffffffffffffffff,	/* high address */
309 	0xffffffffffffffff,	/* address counter max */
310 #if defined(_BIG_ENDIAN)
311 	0x2000,			/* alignment */
312 #else
313 	0x1000,			/* alignment */
314 #endif
315 	0xfc00fc,		/* dlim_burstsizes */
316 	0x1,			/* minimum transfer size */
317 	0xffffffffffffffff,	/* maximum transfer size */
318 	0xffffffffffffffff,	/* maximum segment size */
319 	5,			/* scatter/gather list length */
320 	(unsigned int)1,	/* granularity */
321 	0			/* attribute flags */
322 };
323 
324 ddi_dma_attr_t hxge_tx_dma_attr = {
325 	DMA_ATTR_V0,		/* version number. */
326 	0,			/* low address */
327 	0xffffffffffffffff,	/* high address */
328 	0xffffffffffffffff,	/* address counter max */
329 #if defined(_BIG_ENDIAN)
330 	0x2000,			/* alignment */
331 #else
332 	0x1000,			/* alignment */
333 #endif
334 	0xfc00fc,		/* dlim_burstsizes */
335 	0x1,			/* minimum transfer size */
336 	0xffffffffffffffff,	/* maximum transfer size */
337 	0xffffffffffffffff,	/* maximum segment size */
338 	5,			/* scatter/gather list length */
339 	(unsigned int)1,	/* granularity */
340 	0			/* attribute flags */
341 };
342 
343 ddi_dma_attr_t hxge_rx_dma_attr = {
344 	DMA_ATTR_V0,		/* version number. */
345 	0,			/* low address */
346 	0xffffffffffffffff,	/* high address */
347 	0xffffffffffffffff,	/* address counter max */
348 	0x10000,		/* alignment */
349 	0xfc00fc,		/* dlim_burstsizes */
350 	0x1,			/* minimum transfer size */
351 	0xffffffffffffffff,	/* maximum transfer size */
352 	0xffffffffffffffff,	/* maximum segment size */
353 	1,			/* scatter/gather list length */
354 	(unsigned int)1,	/* granularity */
355 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
356 };
357 
358 ddi_dma_lim_t hxge_dma_limits = {
359 	(uint_t)0,		/* dlim_addr_lo */
360 	(uint_t)0xffffffff,	/* dlim_addr_hi */
361 	(uint_t)0xffffffff,	/* dlim_cntr_max */
362 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
363 	0x1,			/* dlim_minxfer */
364 	1024			/* dlim_speed */
365 };
366 
367 dma_method_t hxge_force_dma = DVMA;
368 
369 /*
370  * dma chunk sizes.
371  *
372  * Try to allocate the largest possible size
373  * so that fewer number of dma chunks would be managed
374  */
375 size_t alloc_sizes[] = {
376     0x1000, 0x2000, 0x4000, 0x8000,
377     0x10000, 0x20000, 0x40000, 0x80000,
378     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
379 };
380 
381 /*
382  * Translate "dev_t" to a pointer to the associated "dev_info_t".
383  */
384 static int
385 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
386 {
387 	p_hxge_t	hxgep = NULL;
388 	int		instance;
389 	int		status = DDI_SUCCESS;
390 	int		i;
391 
392 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
393 
394 	/*
395 	 * Get the device instance since we'll need to setup or retrieve a soft
396 	 * state for this instance.
397 	 */
398 	instance = ddi_get_instance(dip);
399 
400 	switch (cmd) {
401 	case DDI_ATTACH:
402 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
403 		break;
404 
405 	case DDI_RESUME:
406 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
407 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
408 		if (hxgep == NULL) {
409 			status = DDI_FAILURE;
410 			break;
411 		}
412 		if (hxgep->dip != dip) {
413 			status = DDI_FAILURE;
414 			break;
415 		}
416 		if (hxgep->suspended == DDI_PM_SUSPEND) {
417 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
418 		} else {
419 			(void) hxge_resume(hxgep);
420 		}
421 		goto hxge_attach_exit;
422 
423 	case DDI_PM_RESUME:
424 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
425 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
426 		if (hxgep == NULL) {
427 			status = DDI_FAILURE;
428 			break;
429 		}
430 		if (hxgep->dip != dip) {
431 			status = DDI_FAILURE;
432 			break;
433 		}
434 		(void) hxge_resume(hxgep);
435 		goto hxge_attach_exit;
436 
437 	default:
438 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
439 		status = DDI_FAILURE;
440 		goto hxge_attach_exit;
441 	}
442 
443 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
444 		status = DDI_FAILURE;
445 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
446 		    "ddi_soft_state_zalloc failed"));
447 		goto hxge_attach_exit;
448 	}
449 
450 	hxgep = ddi_get_soft_state(hxge_list, instance);
451 	if (hxgep == NULL) {
452 		status = HXGE_ERROR;
453 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
454 		    "ddi_get_soft_state failed"));
455 		goto hxge_attach_fail2;
456 	}
457 
458 	hxgep->drv_state = 0;
459 	hxgep->dip = dip;
460 	hxgep->instance = instance;
461 	hxgep->p_dip = ddi_get_parent(dip);
462 	hxgep->hxge_debug_level = hxge_debug_level;
463 	hpi_debug_level = hxge_debug_level;
464 
465 	/*
466 	 * Initialize MMAC struture.
467 	 */
468 	(void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
469 	hxgep->mmac.available = hxgep->mmac.total;
470 	for (i = 0; i < hxgep->mmac.total; i++) {
471 		hxgep->mmac.addrs[i].set = B_FALSE;
472 		hxgep->mmac.addrs[i].primary = B_FALSE;
473 	}
474 
475 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
476 	    &hxge_rx_dma_attr);
477 
478 	status = hxge_map_regs(hxgep);
479 	if (status != HXGE_OK) {
480 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
481 		goto hxge_attach_fail3;
482 	}
483 
484 	status = hxge_init_common_dev(hxgep);
485 	if (status != HXGE_OK) {
486 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
487 		    "hxge_init_common_dev failed"));
488 		goto hxge_attach_fail4;
489 	}
490 
491 	/*
492 	 * Setup the Ndd parameters for this instance.
493 	 */
494 	hxge_init_param(hxgep);
495 
496 	/*
497 	 * Setup Register Tracing Buffer.
498 	 */
499 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
500 
501 	/* init stats ptr */
502 	hxge_init_statsp(hxgep);
503 
504 	status = hxge_setup_mutexes(hxgep);
505 	if (status != HXGE_OK) {
506 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
507 		goto hxge_attach_fail;
508 	}
509 
510 	/* Scrub the MSI-X memory */
511 	hxge_msix_init(hxgep);
512 
513 	status = hxge_get_config_properties(hxgep);
514 	if (status != HXGE_OK) {
515 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
516 		goto hxge_attach_fail;
517 	}
518 
519 	/*
520 	 * Setup the Kstats for the driver.
521 	 */
522 	hxge_setup_kstats(hxgep);
523 	hxge_setup_param(hxgep);
524 
525 	status = hxge_setup_system_dma_pages(hxgep);
526 	if (status != HXGE_OK) {
527 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
528 		goto hxge_attach_fail;
529 	}
530 
531 	hxge_hw_id_init(hxgep);
532 	hxge_hw_init_niu_common(hxgep);
533 
534 	status = hxge_setup_dev(hxgep);
535 	if (status != DDI_SUCCESS) {
536 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
537 		goto hxge_attach_fail;
538 	}
539 
540 	status = hxge_add_intrs(hxgep);
541 	if (status != DDI_SUCCESS) {
542 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
543 		goto hxge_attach_fail;
544 	}
545 
546 	/*
547 	 * Enable interrupts.
548 	 */
549 	hxge_intrs_enable(hxgep);
550 
551 	/* Keep copy of MSIx table written */
552 	hxge_store_msix_table(hxgep);
553 
554 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
555 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
556 		    "unable to register to mac layer (%d)", status));
557 		goto hxge_attach_fail;
558 	}
559 	mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
560 
561 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
562 	    instance));
563 
564 	goto hxge_attach_exit;
565 
566 hxge_attach_fail:
567 	hxge_unattach(hxgep);
568 	goto hxge_attach_fail1;
569 
570 hxge_attach_fail5:
571 	/*
572 	 * Tear down the ndd parameters setup.
573 	 */
574 	hxge_destroy_param(hxgep);
575 
576 	/*
577 	 * Tear down the kstat setup.
578 	 */
579 	hxge_destroy_kstats(hxgep);
580 
581 hxge_attach_fail4:
582 	if (hxgep->hxge_hw_p) {
583 		hxge_uninit_common_dev(hxgep);
584 		hxgep->hxge_hw_p = NULL;
585 	}
586 hxge_attach_fail3:
587 	/*
588 	 * Unmap the register setup.
589 	 */
590 	hxge_unmap_regs(hxgep);
591 
592 	hxge_fm_fini(hxgep);
593 
594 hxge_attach_fail2:
595 	ddi_soft_state_free(hxge_list, hxgep->instance);
596 
597 hxge_attach_fail1:
598 	if (status != HXGE_OK)
599 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
600 	hxgep = NULL;
601 
602 hxge_attach_exit:
603 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
604 	    status));
605 
606 	return (status);
607 }
608 
609 static int
610 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
611 {
612 	int		status = DDI_SUCCESS;
613 	int		instance;
614 	p_hxge_t	hxgep = NULL;
615 
616 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
617 	instance = ddi_get_instance(dip);
618 	hxgep = ddi_get_soft_state(hxge_list, instance);
619 	if (hxgep == NULL) {
620 		status = DDI_FAILURE;
621 		goto hxge_detach_exit;
622 	}
623 
624 	switch (cmd) {
625 	case DDI_DETACH:
626 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
627 		break;
628 
629 	case DDI_PM_SUSPEND:
630 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
631 		hxgep->suspended = DDI_PM_SUSPEND;
632 		hxge_suspend(hxgep);
633 		break;
634 
635 	case DDI_SUSPEND:
636 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
637 		if (hxgep->suspended != DDI_PM_SUSPEND) {
638 			hxgep->suspended = DDI_SUSPEND;
639 			hxge_suspend(hxgep);
640 		}
641 		break;
642 
643 	default:
644 		status = DDI_FAILURE;
645 		break;
646 	}
647 
648 	if (cmd != DDI_DETACH)
649 		goto hxge_detach_exit;
650 
651 	/*
652 	 * Stop the xcvr polling.
653 	 */
654 	hxgep->suspended = cmd;
655 
656 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
657 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
658 		    "<== hxge_detach status = 0x%08X", status));
659 		return (DDI_FAILURE);
660 	}
661 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
662 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
663 
664 	hxge_unattach(hxgep);
665 	hxgep = NULL;
666 
667 hxge_detach_exit:
668 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
669 	    status));
670 
671 	return (status);
672 }
673 
674 static void
675 hxge_unattach(p_hxge_t hxgep)
676 {
677 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
678 
679 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
680 		return;
681 	}
682 
683 	if (hxgep->hxge_hw_p) {
684 		hxge_uninit_common_dev(hxgep);
685 		hxgep->hxge_hw_p = NULL;
686 	}
687 
688 	if (hxgep->hxge_timerid) {
689 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
690 		hxgep->hxge_timerid = 0;
691 	}
692 
693 	/* Stop any further interrupts. */
694 	hxge_remove_intrs(hxgep);
695 
696 	/* Stop the device and free resources. */
697 	hxge_destroy_dev(hxgep);
698 
699 	/* Tear down the ndd parameters setup. */
700 	hxge_destroy_param(hxgep);
701 
702 	/* Tear down the kstat setup. */
703 	hxge_destroy_kstats(hxgep);
704 
705 	/*
706 	 * Remove the list of ndd parameters which were setup during attach.
707 	 */
708 	if (hxgep->dip) {
709 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
710 		    " hxge_unattach: remove all properties"));
711 		(void) ddi_prop_remove_all(hxgep->dip);
712 	}
713 
714 	/*
715 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
716 	 * previous state before unmapping the registers.
717 	 */
718 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
719 	HXGE_DELAY(1000);
720 
721 	/*
722 	 * Unmap the register setup.
723 	 */
724 	hxge_unmap_regs(hxgep);
725 
726 	hxge_fm_fini(hxgep);
727 
728 	/* Destroy all mutexes.  */
729 	hxge_destroy_mutexes(hxgep);
730 
731 	/*
732 	 * Free the soft state data structures allocated with this instance.
733 	 */
734 	ddi_soft_state_free(hxge_list, hxgep->instance);
735 
736 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
737 }
738 
739 static hxge_status_t
740 hxge_map_regs(p_hxge_t hxgep)
741 {
742 	int		ddi_status = DDI_SUCCESS;
743 	p_dev_regs_t	dev_regs;
744 
745 #ifdef	HXGE_DEBUG
746 	char		*sysname;
747 #endif
748 
749 	off_t		regsize;
750 	hxge_status_t	status = HXGE_OK;
751 	int		nregs;
752 
753 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
754 
755 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
756 		return (HXGE_ERROR);
757 
758 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
759 
760 	hxgep->dev_regs = NULL;
761 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
762 	dev_regs->hxge_regh = NULL;
763 	dev_regs->hxge_pciregh = NULL;
764 	dev_regs->hxge_msix_regh = NULL;
765 
766 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
767 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
768 	    "hxge_map_regs: pci config size 0x%x", regsize));
769 
770 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
771 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
772 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
773 	if (ddi_status != DDI_SUCCESS) {
774 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
775 		    "ddi_map_regs, hxge bus config regs failed"));
776 		goto hxge_map_regs_fail0;
777 	}
778 
779 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
780 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
781 	    dev_regs->hxge_pciregp,
782 	    dev_regs->hxge_pciregh));
783 
784 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
785 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
786 	    "hxge_map_regs: pio size 0x%x", regsize));
787 
788 	/* set up the device mapped register */
789 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
790 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
791 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
792 
793 	if (ddi_status != DDI_SUCCESS) {
794 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
795 		    "ddi_map_regs for Hydra global reg failed"));
796 		goto hxge_map_regs_fail1;
797 	}
798 
799 	/* set up the msi/msi-x mapped register */
800 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
801 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
802 	    "hxge_map_regs: msix size 0x%x", regsize));
803 
804 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
805 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
806 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
807 
808 	if (ddi_status != DDI_SUCCESS) {
809 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
810 		    "ddi_map_regs for msi reg failed"));
811 		goto hxge_map_regs_fail2;
812 	}
813 
814 	hxgep->dev_regs = dev_regs;
815 
816 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
817 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
818 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
819 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
820 
821 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
822 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
823 
824 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
825 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
826 
827 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
828 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
829 
830 	goto hxge_map_regs_exit;
831 
832 hxge_map_regs_fail3:
833 	if (dev_regs->hxge_msix_regh) {
834 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
835 	}
836 
837 hxge_map_regs_fail2:
838 	if (dev_regs->hxge_regh) {
839 		ddi_regs_map_free(&dev_regs->hxge_regh);
840 	}
841 
842 hxge_map_regs_fail1:
843 	if (dev_regs->hxge_pciregh) {
844 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
845 	}
846 
847 hxge_map_regs_fail0:
848 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
849 	kmem_free(dev_regs, sizeof (dev_regs_t));
850 
851 hxge_map_regs_exit:
852 	if (ddi_status != DDI_SUCCESS)
853 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
854 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
855 	return (status);
856 }
857 
858 static void
859 hxge_unmap_regs(p_hxge_t hxgep)
860 {
861 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
862 	if (hxgep->dev_regs) {
863 		if (hxgep->dev_regs->hxge_pciregh) {
864 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
865 			    "==> hxge_unmap_regs: bus"));
866 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
867 			hxgep->dev_regs->hxge_pciregh = NULL;
868 		}
869 
870 		if (hxgep->dev_regs->hxge_regh) {
871 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
872 			    "==> hxge_unmap_regs: device registers"));
873 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
874 			hxgep->dev_regs->hxge_regh = NULL;
875 		}
876 
877 		if (hxgep->dev_regs->hxge_msix_regh) {
878 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
879 			    "==> hxge_unmap_regs: device interrupts"));
880 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
881 			hxgep->dev_regs->hxge_msix_regh = NULL;
882 		}
883 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
884 		hxgep->dev_regs = NULL;
885 	}
886 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
887 }
888 
889 static hxge_status_t
890 hxge_setup_mutexes(p_hxge_t hxgep)
891 {
892 	int		ddi_status = DDI_SUCCESS;
893 	hxge_status_t	status = HXGE_OK;
894 
895 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
896 
897 	/*
898 	 * Get the interrupt cookie so the mutexes can be Initialised.
899 	 */
900 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
901 	    &hxgep->interrupt_cookie);
902 
903 	if (ddi_status != DDI_SUCCESS) {
904 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
905 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
906 		goto hxge_setup_mutexes_exit;
907 	}
908 
909 	/*
910 	 * Initialize mutex's for this device.
911 	 */
912 	MUTEX_INIT(hxgep->genlock, NULL,
913 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
914 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
915 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
916 	RW_INIT(&hxgep->filter_lock, NULL,
917 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
918 	MUTEX_INIT(&hxgep->pio_lock, NULL,
919 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
920 	MUTEX_INIT(&hxgep->timeout.lock, NULL,
921 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
922 
923 hxge_setup_mutexes_exit:
924 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
925 	    "<== hxge_setup_mutexes status = %x", status));
926 
927 	if (ddi_status != DDI_SUCCESS)
928 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
929 
930 	return (status);
931 }
932 
933 static void
934 hxge_destroy_mutexes(p_hxge_t hxgep)
935 {
936 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
937 	RW_DESTROY(&hxgep->filter_lock);
938 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
939 	MUTEX_DESTROY(hxgep->genlock);
940 	MUTEX_DESTROY(&hxgep->pio_lock);
941 	MUTEX_DESTROY(&hxgep->timeout.lock);
942 
943 	if (hxge_debug_init == 1) {
944 		MUTEX_DESTROY(&hxgedebuglock);
945 		hxge_debug_init = 0;
946 	}
947 
948 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
949 }
950 
951 hxge_status_t
952 hxge_init(p_hxge_t hxgep)
953 {
954 	hxge_status_t status = HXGE_OK;
955 
956 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
957 
958 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
959 		return (status);
960 	}
961 
962 	/*
963 	 * Allocate system memory for the receive/transmit buffer blocks and
964 	 * receive/transmit descriptor rings.
965 	 */
966 	status = hxge_alloc_mem_pool(hxgep);
967 	if (status != HXGE_OK) {
968 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
969 		goto hxge_init_fail1;
970 	}
971 
972 	/*
973 	 * Initialize and enable TXDMA channels.
974 	 */
975 	status = hxge_init_txdma_channels(hxgep);
976 	if (status != HXGE_OK) {
977 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
978 		goto hxge_init_fail3;
979 	}
980 
981 	/*
982 	 * Initialize and enable RXDMA channels.
983 	 */
984 	status = hxge_init_rxdma_channels(hxgep);
985 	if (status != HXGE_OK) {
986 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
987 		goto hxge_init_fail4;
988 	}
989 
990 	/*
991 	 * Initialize TCAM
992 	 */
993 	status = hxge_classify_init(hxgep);
994 	if (status != HXGE_OK) {
995 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
996 		goto hxge_init_fail5;
997 	}
998 
999 	/*
1000 	 * Initialize the VMAC block.
1001 	 */
1002 	status = hxge_vmac_init(hxgep);
1003 	if (status != HXGE_OK) {
1004 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1005 		goto hxge_init_fail5;
1006 	}
1007 
1008 	/* Bringup - this may be unnecessary when PXE and FCODE available */
1009 	status = hxge_pfc_set_default_mac_addr(hxgep);
1010 	if (status != HXGE_OK) {
1011 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1012 		    "Default Address Failure\n"));
1013 		goto hxge_init_fail5;
1014 	}
1015 
1016 	hxge_intrs_enable(hxgep);
1017 
1018 	/* Keep copy of MSIx table written */
1019 	hxge_store_msix_table(hxgep);
1020 
1021 	/*
1022 	 * Enable hardware interrupts.
1023 	 */
1024 	hxge_intr_hw_enable(hxgep);
1025 	hxgep->drv_state |= STATE_HW_INITIALIZED;
1026 
1027 	goto hxge_init_exit;
1028 
1029 hxge_init_fail5:
1030 	hxge_uninit_rxdma_channels(hxgep);
1031 hxge_init_fail4:
1032 	hxge_uninit_txdma_channels(hxgep);
1033 hxge_init_fail3:
1034 	hxge_free_mem_pool(hxgep);
1035 hxge_init_fail1:
1036 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1037 	    "<== hxge_init status (failed) = 0x%08x", status));
1038 	return (status);
1039 
1040 hxge_init_exit:
1041 
1042 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1043 	    status));
1044 
1045 	return (status);
1046 }
1047 
1048 timeout_id_t
1049 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1050 {
1051 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1052 		return (timeout(func, (caddr_t)hxgep,
1053 		    drv_usectohz(1000 * msec)));
1054 	}
1055 	return (NULL);
1056 }
1057 
1058 /*ARGSUSED*/
1059 void
1060 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1061 {
1062 	if (timerid) {
1063 		(void) untimeout(timerid);
1064 	}
1065 }
1066 
1067 void
1068 hxge_uninit(p_hxge_t hxgep)
1069 {
1070 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1071 
1072 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1073 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1074 		    "==> hxge_uninit: not initialized"));
1075 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1076 		return;
1077 	}
1078 
1079 	/* Stop timer */
1080 	if (hxgep->hxge_timerid) {
1081 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1082 		hxgep->hxge_timerid = 0;
1083 	}
1084 
1085 	(void) hxge_intr_hw_disable(hxgep);
1086 
1087 	/* Reset the receive VMAC side.  */
1088 	(void) hxge_rx_vmac_disable(hxgep);
1089 
1090 	/* Free classification resources */
1091 	(void) hxge_classify_uninit(hxgep);
1092 
1093 	/* Reset the transmit/receive DMA side.  */
1094 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1095 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1096 
1097 	hxge_uninit_txdma_channels(hxgep);
1098 	hxge_uninit_rxdma_channels(hxgep);
1099 
1100 	/* Reset the transmit VMAC side.  */
1101 	(void) hxge_tx_vmac_disable(hxgep);
1102 
1103 	hxge_free_mem_pool(hxgep);
1104 
1105 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1106 
1107 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1108 }
1109 
1110 /*ARGSUSED*/
1111 /*VARARGS*/
1112 void
1113 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1114 {
1115 	char		msg_buffer[1048];
1116 	char		prefix_buffer[32];
1117 	int		instance;
1118 	uint64_t	debug_level;
1119 	int		cmn_level = CE_CONT;
1120 	va_list		ap;
1121 
1122 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1123 	    hxgep->hxge_debug_level;
1124 
1125 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1126 	    (level == HXGE_ERR_CTL)) {
1127 		/* do the msg processing */
1128 		if (hxge_debug_init == 0) {
1129 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1130 			hxge_debug_init = 1;
1131 		}
1132 
1133 		MUTEX_ENTER(&hxgedebuglock);
1134 
1135 		if ((level & HXGE_NOTE)) {
1136 			cmn_level = CE_NOTE;
1137 		}
1138 
1139 		if (level & HXGE_ERR_CTL) {
1140 			cmn_level = CE_WARN;
1141 		}
1142 
1143 		va_start(ap, fmt);
1144 		(void) vsprintf(msg_buffer, fmt, ap);
1145 		va_end(ap);
1146 
1147 		if (hxgep == NULL) {
1148 			instance = -1;
1149 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1150 		} else {
1151 			instance = hxgep->instance;
1152 			(void) sprintf(prefix_buffer,
1153 			    "%s%d :", "hxge", instance);
1154 		}
1155 
1156 		MUTEX_EXIT(&hxgedebuglock);
1157 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1158 	}
1159 }
1160 
1161 char *
1162 hxge_dump_packet(char *addr, int size)
1163 {
1164 	uchar_t		*ap = (uchar_t *)addr;
1165 	int		i;
1166 	static char	etherbuf[1024];
1167 	char		*cp = etherbuf;
1168 	char		digits[] = "0123456789abcdef";
1169 
1170 	if (!size)
1171 		size = 60;
1172 
1173 	if (size > MAX_DUMP_SZ) {
1174 		/* Dump the leading bytes */
1175 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1176 			if (*ap > 0x0f)
1177 				*cp++ = digits[*ap >> 4];
1178 			*cp++ = digits[*ap++ & 0xf];
1179 			*cp++ = ':';
1180 		}
1181 		for (i = 0; i < 20; i++)
1182 			*cp++ = '.';
1183 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1184 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1185 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1186 			if (*ap > 0x0f)
1187 				*cp++ = digits[*ap >> 4];
1188 			*cp++ = digits[*ap++ & 0xf];
1189 			*cp++ = ':';
1190 		}
1191 	} else {
1192 		for (i = 0; i < size; i++) {
1193 			if (*ap > 0x0f)
1194 				*cp++ = digits[*ap >> 4];
1195 			*cp++ = digits[*ap++ & 0xf];
1196 			*cp++ = ':';
1197 		}
1198 	}
1199 	*--cp = 0;
1200 	return (etherbuf);
1201 }
1202 
1203 static void
1204 hxge_suspend(p_hxge_t hxgep)
1205 {
1206 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1207 
1208 	/*
1209 	 * Stop the link status timer before hxge_intrs_disable() to avoid
1210 	 * accessing the the MSIX table simultaneously. Note that the timer
1211 	 * routine polls for MSIX parity errors.
1212 	 */
1213 	MUTEX_ENTER(&hxgep->timeout.lock);
1214 	if (hxgep->timeout.id)
1215 		(void) untimeout(hxgep->timeout.id);
1216 	MUTEX_EXIT(&hxgep->timeout.lock);
1217 
1218 	hxge_intrs_disable(hxgep);
1219 	hxge_destroy_dev(hxgep);
1220 
1221 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1222 }
1223 
1224 static hxge_status_t
1225 hxge_resume(p_hxge_t hxgep)
1226 {
1227 	hxge_status_t status = HXGE_OK;
1228 
1229 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1230 	hxgep->suspended = DDI_RESUME;
1231 
1232 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1233 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1234 
1235 	(void) hxge_rx_vmac_enable(hxgep);
1236 	(void) hxge_tx_vmac_enable(hxgep);
1237 
1238 	hxge_intrs_enable(hxgep);
1239 
1240 	/* Keep copy of MSIx table written */
1241 	hxge_store_msix_table(hxgep);
1242 
1243 	hxgep->suspended = 0;
1244 
1245 	/*
1246 	 * Resume the link status timer after hxge_intrs_enable to avoid
1247 	 * accessing MSIX table simultaneously.
1248 	 */
1249 	MUTEX_ENTER(&hxgep->timeout.lock);
1250 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1251 	    hxgep->timeout.ticks);
1252 	MUTEX_EXIT(&hxgep->timeout.lock);
1253 
1254 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1255 	    "<== hxge_resume status = 0x%x", status));
1256 
1257 	return (status);
1258 }
1259 
1260 hxge_status_t
1261 hxge_setup_dev(p_hxge_t hxgep)
1262 {
1263 	hxge_status_t status = HXGE_OK;
1264 
1265 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1266 
1267 	status = hxge_link_init(hxgep);
1268 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1269 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1270 		    "Bad register acc handle"));
1271 		status = HXGE_ERROR;
1272 	}
1273 
1274 	if (status != HXGE_OK) {
1275 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1276 		    " hxge_setup_dev status (link init 0x%08x)", status));
1277 		goto hxge_setup_dev_exit;
1278 	}
1279 
1280 hxge_setup_dev_exit:
1281 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1282 	    "<== hxge_setup_dev status = 0x%08x", status));
1283 
1284 	return (status);
1285 }
1286 
1287 static void
1288 hxge_destroy_dev(p_hxge_t hxgep)
1289 {
1290 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1291 
1292 	(void) hxge_hw_stop(hxgep);
1293 
1294 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1295 }
1296 
1297 static hxge_status_t
1298 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1299 {
1300 	int			ddi_status = DDI_SUCCESS;
1301 	uint_t			count;
1302 	ddi_dma_cookie_t	cookie;
1303 	uint_t			iommu_pagesize;
1304 	hxge_status_t		status = HXGE_OK;
1305 
1306 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1307 
1308 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1309 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1310 
1311 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1312 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1313 	    " default_block_size %d iommu_pagesize %d",
1314 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1315 	    hxgep->rx_default_block_size, iommu_pagesize));
1316 
1317 	if (iommu_pagesize != 0) {
1318 		if (hxgep->sys_page_sz == iommu_pagesize) {
1319 			/* Hydra support up to 8K pages */
1320 			if (iommu_pagesize > 0x2000)
1321 				hxgep->sys_page_sz = 0x2000;
1322 		} else {
1323 			if (hxgep->sys_page_sz > iommu_pagesize)
1324 				hxgep->sys_page_sz = iommu_pagesize;
1325 		}
1326 	}
1327 
1328 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1329 
1330 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1331 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1332 	    "default_block_size %d page mask %d",
1333 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1334 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1335 
1336 	switch (hxgep->sys_page_sz) {
1337 	default:
1338 		hxgep->sys_page_sz = 0x1000;
1339 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1340 		hxgep->rx_default_block_size = 0x1000;
1341 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1342 		break;
1343 	case 0x1000:
1344 		hxgep->rx_default_block_size = 0x1000;
1345 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1346 		break;
1347 	case 0x2000:
1348 		hxgep->rx_default_block_size = 0x2000;
1349 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1350 		break;
1351 	}
1352 
1353 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1354 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1355 
1356 	/*
1357 	 * Get the system DMA burst size.
1358 	 */
1359 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1360 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1361 	if (ddi_status != DDI_SUCCESS) {
1362 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1363 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1364 		goto hxge_get_soft_properties_exit;
1365 	}
1366 
1367 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1368 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1369 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1370 	    &cookie, &count);
1371 	if (ddi_status != DDI_DMA_MAPPED) {
1372 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1373 		    "Binding spare handle to find system burstsize failed."));
1374 		ddi_status = DDI_FAILURE;
1375 		goto hxge_get_soft_properties_fail1;
1376 	}
1377 
1378 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1379 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1380 
1381 hxge_get_soft_properties_fail1:
1382 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1383 
1384 hxge_get_soft_properties_exit:
1385 
1386 	if (ddi_status != DDI_SUCCESS)
1387 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1388 
1389 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1390 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1391 
1392 	return (status);
1393 }
1394 
1395 hxge_status_t
1396 hxge_alloc_mem_pool(p_hxge_t hxgep)
1397 {
1398 	hxge_status_t status = HXGE_OK;
1399 
1400 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1401 
1402 	status = hxge_alloc_rx_mem_pool(hxgep);
1403 	if (status != HXGE_OK) {
1404 		return (HXGE_ERROR);
1405 	}
1406 
1407 	status = hxge_alloc_tx_mem_pool(hxgep);
1408 	if (status != HXGE_OK) {
1409 		hxge_free_rx_mem_pool(hxgep);
1410 		return (HXGE_ERROR);
1411 	}
1412 
1413 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1414 	return (HXGE_OK);
1415 }
1416 
1417 static void
1418 hxge_free_mem_pool(p_hxge_t hxgep)
1419 {
1420 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1421 
1422 	hxge_free_rx_mem_pool(hxgep);
1423 	hxge_free_tx_mem_pool(hxgep);
1424 
1425 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1426 }
1427 
1428 static hxge_status_t
1429 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1430 {
1431 	int			i, j;
1432 	uint32_t		ndmas, st_rdc;
1433 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1434 	p_hxge_hw_pt_cfg_t	p_cfgp;
1435 	p_hxge_dma_pool_t	dma_poolp;
1436 	p_hxge_dma_common_t	*dma_buf_p;
1437 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1438 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1439 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1440 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1441 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1442 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1443 	size_t			rx_buf_alloc_size;
1444 	size_t			rx_rbr_cntl_alloc_size;
1445 	size_t			rx_rcr_cntl_alloc_size;
1446 	size_t			rx_mbox_cntl_alloc_size;
1447 	uint32_t		*num_chunks;	/* per dma */
1448 	hxge_status_t		status = HXGE_OK;
1449 
1450 	uint32_t		hxge_port_rbr_size;
1451 	uint32_t		hxge_port_rbr_spare_size;
1452 	uint32_t		hxge_port_rcr_size;
1453 
1454 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1455 
1456 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1457 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1458 	st_rdc = p_cfgp->start_rdc;
1459 	ndmas = p_cfgp->max_rdcs;
1460 
1461 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1462 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1463 
1464 	/*
1465 	 * Allocate memory for each receive DMA channel.
1466 	 */
1467 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1468 	    KM_SLEEP);
1469 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1470 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1471 
1472 	dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1473 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1474 	dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1475 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1476 	dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1477 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1478 	dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1479 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1480 	dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1481 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1482 	dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1483 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1484 
1485 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1486 	    KM_SLEEP);
1487 
1488 	/*
1489 	 * Assume that each DMA channel will be configured with default block
1490 	 * size. rbr block counts are mod of batch count (16).
1491 	 */
1492 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1493 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1494 
1495 	if (!hxge_port_rbr_size) {
1496 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1497 	}
1498 
1499 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1500 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1501 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1502 	}
1503 
1504 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1505 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1506 
1507 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1508 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1509 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1510 	}
1511 
1512 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1513 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1514 
1515 	/*
1516 	 * Addresses of receive block ring, receive completion ring and the
1517 	 * mailbox must be all cache-aligned (64 bytes).
1518 	 */
1519 	rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1520 	rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1521 	rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1522 	rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1523 
1524 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1525 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1526 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1527 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1528 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1529 
1530 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1531 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1532 
1533 	/*
1534 	 * Allocate memory for receive buffers and descriptor rings. Replace
1535 	 * allocation functions with interface functions provided by the
1536 	 * partition manager when it is available.
1537 	 */
1538 	/*
1539 	 * Allocate memory for the receive buffer blocks.
1540 	 */
1541 	for (i = 0; i < ndmas; i++) {
1542 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1543 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1544 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1545 		    i, dma_buf_p[i], &dma_buf_p[i]));
1546 
1547 		num_chunks[i] = 0;
1548 
1549 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1550 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1551 		    &num_chunks[i]);
1552 		if (status != HXGE_OK) {
1553 			break;
1554 		}
1555 
1556 		st_rdc++;
1557 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1558 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1559 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1560 		    dma_buf_p[i], &dma_buf_p[i]));
1561 	}
1562 
1563 	if (i < ndmas) {
1564 		goto hxge_alloc_rx_mem_fail1;
1565 	}
1566 
1567 	/*
1568 	 * Allocate memory for descriptor rings and mailbox.
1569 	 */
1570 	st_rdc = p_cfgp->start_rdc;
1571 	for (j = 0; j < ndmas; j++) {
1572 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1573 		    &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1574 		    rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1575 			break;
1576 		}
1577 
1578 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1579 		    &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1580 		    rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1581 			break;
1582 		}
1583 
1584 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1585 		    &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1586 		    rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1587 			break;
1588 		}
1589 		st_rdc++;
1590 	}
1591 
1592 	if (j < ndmas) {
1593 		goto hxge_alloc_rx_mem_fail2;
1594 	}
1595 
1596 	dma_poolp->ndmas = ndmas;
1597 	dma_poolp->num_chunks = num_chunks;
1598 	dma_poolp->buf_allocated = B_TRUE;
1599 	hxgep->rx_buf_pool_p = dma_poolp;
1600 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1601 
1602 	dma_rbr_cntl_poolp->ndmas = ndmas;
1603 	dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1604 	hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1605 	dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1606 
1607 	dma_rcr_cntl_poolp->ndmas = ndmas;
1608 	dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1609 	hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1610 	dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1611 
1612 	dma_mbox_cntl_poolp->ndmas = ndmas;
1613 	dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1614 	hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1615 	dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1616 
1617 	goto hxge_alloc_rx_mem_pool_exit;
1618 
1619 hxge_alloc_rx_mem_fail2:
1620 	/* Free control buffers */
1621 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1622 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1623 	for (; j >= 0; j--) {
1624 		hxge_free_rx_cntl_dma(hxgep,
1625 		    (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1626 		hxge_free_rx_cntl_dma(hxgep,
1627 		    (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1628 		hxge_free_rx_cntl_dma(hxgep,
1629 		    (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1630 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1631 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1632 	}
1633 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1634 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1635 
1636 hxge_alloc_rx_mem_fail1:
1637 	/* Free data buffers */
1638 	i--;
1639 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1640 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1641 	for (; i >= 0; i--) {
1642 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1643 		    num_chunks[i]);
1644 	}
1645 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1646 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1647 
1648 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1649 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1650 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1651 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1652 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1653 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1654 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1655 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1656 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1657 
1658 hxge_alloc_rx_mem_pool_exit:
1659 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1660 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1661 
1662 	return (status);
1663 }
1664 
1665 static void
1666 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1667 {
1668 	uint32_t		i, ndmas;
1669 	p_hxge_dma_pool_t	dma_poolp;
1670 	p_hxge_dma_common_t	*dma_buf_p;
1671 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1672 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1673 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1674 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1675 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1676 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1677 	uint32_t		*num_chunks;
1678 
1679 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1680 
1681 	dma_poolp = hxgep->rx_buf_pool_p;
1682 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1683 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1684 		    "(null rx buf pool or buf not allocated"));
1685 		return;
1686 	}
1687 
1688 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1689 	if (dma_rbr_cntl_poolp == NULL ||
1690 	    (!dma_rbr_cntl_poolp->buf_allocated)) {
1691 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1692 		    "<== hxge_free_rx_mem_pool "
1693 		    "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1694 		return;
1695 	}
1696 
1697 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1698 	if (dma_rcr_cntl_poolp == NULL ||
1699 	    (!dma_rcr_cntl_poolp->buf_allocated)) {
1700 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1701 		    "<== hxge_free_rx_mem_pool "
1702 		    "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1703 		return;
1704 	}
1705 
1706 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1707 	if (dma_mbox_cntl_poolp == NULL ||
1708 	    (!dma_mbox_cntl_poolp->buf_allocated)) {
1709 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1710 		    "<== hxge_free_rx_mem_pool "
1711 		    "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1712 		return;
1713 	}
1714 
1715 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1716 	num_chunks = dma_poolp->num_chunks;
1717 
1718 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1719 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1720 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1721 	ndmas = dma_rbr_cntl_poolp->ndmas;
1722 
1723 	for (i = 0; i < ndmas; i++) {
1724 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1725 	}
1726 
1727 	for (i = 0; i < ndmas; i++) {
1728 		hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1729 		hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1730 		hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1731 	}
1732 
1733 	for (i = 0; i < ndmas; i++) {
1734 		KMEM_FREE(dma_buf_p[i],
1735 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1736 		KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1737 		KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1738 		KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1739 	}
1740 
1741 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1742 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1743 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1744 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1745 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1746 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1747 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1748 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1749 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1750 
1751 	hxgep->rx_buf_pool_p = NULL;
1752 	hxgep->rx_rbr_cntl_pool_p = NULL;
1753 	hxgep->rx_rcr_cntl_pool_p = NULL;
1754 	hxgep->rx_mbox_cntl_pool_p = NULL;
1755 
1756 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1757 }
1758 
1759 static hxge_status_t
1760 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1761     p_hxge_dma_common_t *dmap,
1762     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1763 {
1764 	p_hxge_dma_common_t	rx_dmap;
1765 	hxge_status_t		status = HXGE_OK;
1766 	size_t			total_alloc_size;
1767 	size_t			allocated = 0;
1768 	int			i, size_index, array_size;
1769 
1770 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1771 
1772 	rx_dmap = (p_hxge_dma_common_t)
1773 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1774 
1775 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1776 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1777 	    dma_channel, alloc_size, block_size, dmap));
1778 
1779 	total_alloc_size = alloc_size;
1780 
1781 	i = 0;
1782 	size_index = 0;
1783 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1784 	while ((size_index < array_size) &&
1785 	    (alloc_sizes[size_index] < alloc_size))
1786 		size_index++;
1787 	if (size_index >= array_size) {
1788 		size_index = array_size - 1;
1789 	}
1790 
1791 	while ((allocated < total_alloc_size) &&
1792 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1793 		rx_dmap[i].dma_chunk_index = i;
1794 		rx_dmap[i].block_size = block_size;
1795 		rx_dmap[i].alength = alloc_sizes[size_index];
1796 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1797 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1798 		rx_dmap[i].dma_channel = dma_channel;
1799 		rx_dmap[i].contig_alloc_type = B_FALSE;
1800 
1801 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1802 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1803 		    "i %d nblocks %d alength %d",
1804 		    dma_channel, i, &rx_dmap[i], block_size,
1805 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1806 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1807 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1808 		    &hxge_dev_buf_dma_acc_attr,
1809 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1810 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1811 		if (status != HXGE_OK) {
1812 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1813 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1814 			    " for size: %d", alloc_sizes[size_index]));
1815 			size_index--;
1816 		} else {
1817 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1818 			    " alloc_rx_buf_dma allocated rdc %d "
1819 			    "chunk %d size %x dvma %x bufp %llx ",
1820 			    dma_channel, i, rx_dmap[i].alength,
1821 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1822 			i++;
1823 			allocated += alloc_sizes[size_index];
1824 		}
1825 	}
1826 
1827 	if (allocated < total_alloc_size) {
1828 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1829 		    " hxge_alloc_rx_buf_dma failed due to"
1830 		    " allocated(%d) < required(%d)",
1831 		    allocated, total_alloc_size));
1832 		goto hxge_alloc_rx_mem_fail1;
1833 	}
1834 
1835 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1836 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1837 
1838 	*num_chunks = i;
1839 	*dmap = rx_dmap;
1840 
1841 	goto hxge_alloc_rx_mem_exit;
1842 
1843 hxge_alloc_rx_mem_fail1:
1844 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1845 
1846 hxge_alloc_rx_mem_exit:
1847 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1848 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1849 
1850 	return (status);
1851 }
1852 
1853 /*ARGSUSED*/
1854 static void
1855 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1856     uint32_t num_chunks)
1857 {
1858 	int i;
1859 
1860 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1861 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1862 
1863 	for (i = 0; i < num_chunks; i++) {
1864 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1865 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1866 		hxge_dma_mem_free(dmap++);
1867 	}
1868 
1869 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1870 }
1871 
1872 /*ARGSUSED*/
1873 static hxge_status_t
1874 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1875     p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1876 {
1877 	p_hxge_dma_common_t	rx_dmap;
1878 	hxge_status_t		status = HXGE_OK;
1879 
1880 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1881 
1882 	rx_dmap = (p_hxge_dma_common_t)
1883 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1884 
1885 	rx_dmap->contig_alloc_type = B_FALSE;
1886 
1887 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1888 	    attr, size, &hxge_dev_desc_dma_acc_attr,
1889 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1890 	if (status != HXGE_OK) {
1891 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1892 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1893 		    " for size: %d", size));
1894 		goto hxge_alloc_rx_cntl_dma_fail1;
1895 	}
1896 
1897 	*dmap = rx_dmap;
1898 
1899 	goto hxge_alloc_rx_cntl_dma_exit;
1900 
1901 hxge_alloc_rx_cntl_dma_fail1:
1902 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1903 
1904 hxge_alloc_rx_cntl_dma_exit:
1905 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1906 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1907 
1908 	return (status);
1909 }
1910 
1911 /*ARGSUSED*/
1912 static void
1913 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1914 {
1915 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1916 
1917 	hxge_dma_mem_free(dmap);
1918 
1919 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1920 }
1921 
1922 static hxge_status_t
1923 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1924 {
1925 	hxge_status_t		status = HXGE_OK;
1926 	int			i, j;
1927 	uint32_t		ndmas, st_tdc;
1928 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1929 	p_hxge_hw_pt_cfg_t	p_cfgp;
1930 	p_hxge_dma_pool_t	dma_poolp;
1931 	p_hxge_dma_common_t	*dma_buf_p;
1932 	p_hxge_dma_pool_t	dma_cntl_poolp;
1933 	p_hxge_dma_common_t	*dma_cntl_p;
1934 	size_t			tx_buf_alloc_size;
1935 	size_t			tx_cntl_alloc_size;
1936 	uint32_t		*num_chunks;	/* per dma */
1937 
1938 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1939 
1940 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1941 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1942 	st_tdc = p_cfgp->start_tdc;
1943 	ndmas = p_cfgp->max_tdcs;
1944 
1945 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1946 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1947 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1948 	/*
1949 	 * Allocate memory for each transmit DMA channel.
1950 	 */
1951 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1952 	    KM_SLEEP);
1953 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1954 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1955 
1956 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1957 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1958 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1959 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1960 
1961 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1962 
1963 	/*
1964 	 * Assume that each DMA channel will be configured with default
1965 	 * transmit bufer size for copying transmit data. (For packet payload
1966 	 * over this limit, packets will not be copied.)
1967 	 */
1968 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1969 
1970 	/*
1971 	 * Addresses of transmit descriptor ring and the mailbox must be all
1972 	 * cache-aligned (64 bytes).
1973 	 */
1974 	tx_cntl_alloc_size = hxge_tx_ring_size;
1975 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1976 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1977 
1978 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1979 	    KM_SLEEP);
1980 
1981 	/*
1982 	 * Allocate memory for transmit buffers and descriptor rings. Replace
1983 	 * allocation functions with interface functions provided by the
1984 	 * partition manager when it is available.
1985 	 *
1986 	 * Allocate memory for the transmit buffer pool.
1987 	 */
1988 	for (i = 0; i < ndmas; i++) {
1989 		num_chunks[i] = 0;
1990 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1991 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1992 		if (status != HXGE_OK) {
1993 			break;
1994 		}
1995 		st_tdc++;
1996 	}
1997 
1998 	if (i < ndmas) {
1999 		goto hxge_alloc_tx_mem_pool_fail1;
2000 	}
2001 
2002 	st_tdc = p_cfgp->start_tdc;
2003 
2004 	/*
2005 	 * Allocate memory for descriptor rings and mailbox.
2006 	 */
2007 	for (j = 0; j < ndmas; j++) {
2008 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2009 		    tx_cntl_alloc_size);
2010 		if (status != HXGE_OK) {
2011 			break;
2012 		}
2013 		st_tdc++;
2014 	}
2015 
2016 	if (j < ndmas) {
2017 		goto hxge_alloc_tx_mem_pool_fail2;
2018 	}
2019 
2020 	dma_poolp->ndmas = ndmas;
2021 	dma_poolp->num_chunks = num_chunks;
2022 	dma_poolp->buf_allocated = B_TRUE;
2023 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2024 	hxgep->tx_buf_pool_p = dma_poolp;
2025 
2026 	dma_cntl_poolp->ndmas = ndmas;
2027 	dma_cntl_poolp->buf_allocated = B_TRUE;
2028 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2029 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2030 
2031 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2032 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2033 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2034 
2035 	goto hxge_alloc_tx_mem_pool_exit;
2036 
2037 hxge_alloc_tx_mem_pool_fail2:
2038 	/* Free control buffers */
2039 	j--;
2040 	for (; j >= 0; j--) {
2041 		hxge_free_tx_cntl_dma(hxgep,
2042 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
2043 	}
2044 
2045 hxge_alloc_tx_mem_pool_fail1:
2046 	/* Free data buffers */
2047 	i--;
2048 	for (; i >= 0; i--) {
2049 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2050 		    num_chunks[i]);
2051 	}
2052 
2053 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2054 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2055 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2056 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2057 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2058 
2059 hxge_alloc_tx_mem_pool_exit:
2060 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2061 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2062 
2063 	return (status);
2064 }
2065 
2066 static hxge_status_t
2067 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2068     p_hxge_dma_common_t *dmap, size_t alloc_size,
2069     size_t block_size, uint32_t *num_chunks)
2070 {
2071 	p_hxge_dma_common_t	tx_dmap;
2072 	hxge_status_t		status = HXGE_OK;
2073 	size_t			total_alloc_size;
2074 	size_t			allocated = 0;
2075 	int			i, size_index, array_size;
2076 
2077 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2078 
2079 	tx_dmap = (p_hxge_dma_common_t)
2080 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2081 
2082 	total_alloc_size = alloc_size;
2083 	i = 0;
2084 	size_index = 0;
2085 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
2086 	while ((size_index < array_size) &&
2087 	    (alloc_sizes[size_index] < alloc_size))
2088 		size_index++;
2089 	if (size_index >= array_size) {
2090 		size_index = array_size - 1;
2091 	}
2092 
2093 	while ((allocated < total_alloc_size) &&
2094 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2095 		tx_dmap[i].dma_chunk_index = i;
2096 		tx_dmap[i].block_size = block_size;
2097 		tx_dmap[i].alength = alloc_sizes[size_index];
2098 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2099 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2100 		tx_dmap[i].dma_channel = dma_channel;
2101 		tx_dmap[i].contig_alloc_type = B_FALSE;
2102 
2103 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2104 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
2105 		    &hxge_dev_buf_dma_acc_attr,
2106 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
2107 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
2108 		if (status != HXGE_OK) {
2109 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2110 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
2111 			    " for size: %d", alloc_sizes[size_index]));
2112 			size_index--;
2113 		} else {
2114 			i++;
2115 			allocated += alloc_sizes[size_index];
2116 		}
2117 	}
2118 
2119 	if (allocated < total_alloc_size) {
2120 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2121 		    " hxge_alloc_tx_buf_dma: failed due to"
2122 		    " allocated(%d) < required(%d)",
2123 		    allocated, total_alloc_size));
2124 		goto hxge_alloc_tx_mem_fail1;
2125 	}
2126 
2127 	*num_chunks = i;
2128 	*dmap = tx_dmap;
2129 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2130 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2131 	    *dmap, i));
2132 	goto hxge_alloc_tx_mem_exit;
2133 
2134 hxge_alloc_tx_mem_fail1:
2135 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2136 
2137 hxge_alloc_tx_mem_exit:
2138 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2139 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2140 
2141 	return (status);
2142 }
2143 
2144 /*ARGSUSED*/
2145 static void
2146 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2147     uint32_t num_chunks)
2148 {
2149 	int i;
2150 
2151 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2152 
2153 	for (i = 0; i < num_chunks; i++) {
2154 		hxge_dma_mem_free(dmap++);
2155 	}
2156 
2157 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2158 }
2159 
2160 /*ARGSUSED*/
2161 static hxge_status_t
2162 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2163     p_hxge_dma_common_t *dmap, size_t size)
2164 {
2165 	p_hxge_dma_common_t	tx_dmap;
2166 	hxge_status_t		status = HXGE_OK;
2167 
2168 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2169 
2170 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2171 	    KM_SLEEP);
2172 
2173 	tx_dmap->contig_alloc_type = B_FALSE;
2174 
2175 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2176 	    &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2177 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2178 	if (status != HXGE_OK) {
2179 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2180 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2181 		    " for size: %d", size));
2182 		goto hxge_alloc_tx_cntl_dma_fail1;
2183 	}
2184 
2185 	*dmap = tx_dmap;
2186 
2187 	goto hxge_alloc_tx_cntl_dma_exit;
2188 
2189 hxge_alloc_tx_cntl_dma_fail1:
2190 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2191 
2192 hxge_alloc_tx_cntl_dma_exit:
2193 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2194 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2195 
2196 	return (status);
2197 }
2198 
2199 /*ARGSUSED*/
2200 static void
2201 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2202 {
2203 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2204 
2205 	hxge_dma_mem_free(dmap);
2206 
2207 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2208 }
2209 
2210 static void
2211 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2212 {
2213 	uint32_t		i, ndmas;
2214 	p_hxge_dma_pool_t	dma_poolp;
2215 	p_hxge_dma_common_t	*dma_buf_p;
2216 	p_hxge_dma_pool_t	dma_cntl_poolp;
2217 	p_hxge_dma_common_t	*dma_cntl_p;
2218 	uint32_t		*num_chunks;
2219 
2220 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2221 
2222 	dma_poolp = hxgep->tx_buf_pool_p;
2223 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2224 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2225 		    "<== hxge_free_tx_mem_pool "
2226 		    "(null rx buf pool or buf not allocated"));
2227 		return;
2228 	}
2229 
2230 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2231 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2232 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2233 		    "<== hxge_free_tx_mem_pool "
2234 		    "(null tx cntl buf pool or cntl buf not allocated"));
2235 		return;
2236 	}
2237 
2238 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2239 	num_chunks = dma_poolp->num_chunks;
2240 
2241 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2242 	ndmas = dma_cntl_poolp->ndmas;
2243 
2244 	for (i = 0; i < ndmas; i++) {
2245 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2246 	}
2247 
2248 	for (i = 0; i < ndmas; i++) {
2249 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2250 	}
2251 
2252 	for (i = 0; i < ndmas; i++) {
2253 		KMEM_FREE(dma_buf_p[i],
2254 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2255 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2256 	}
2257 
2258 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2259 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2260 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2261 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2262 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2263 
2264 	hxgep->tx_buf_pool_p = NULL;
2265 	hxgep->tx_cntl_pool_p = NULL;
2266 
2267 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2268 }
2269 
2270 /*ARGSUSED*/
2271 static hxge_status_t
2272 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2273     struct ddi_dma_attr *dma_attrp,
2274     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2275     p_hxge_dma_common_t dma_p)
2276 {
2277 	caddr_t		kaddrp;
2278 	int		ddi_status = DDI_SUCCESS;
2279 
2280 	dma_p->dma_handle = NULL;
2281 	dma_p->acc_handle = NULL;
2282 	dma_p->kaddrp = NULL;
2283 
2284 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2285 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2286 	if (ddi_status != DDI_SUCCESS) {
2287 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2288 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2289 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2290 	}
2291 
2292 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2293 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2294 	    &dma_p->acc_handle);
2295 	if (ddi_status != DDI_SUCCESS) {
2296 		/* The caller will decide whether it is fatal */
2297 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2298 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2299 		ddi_dma_free_handle(&dma_p->dma_handle);
2300 		dma_p->dma_handle = NULL;
2301 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2302 	}
2303 
2304 	if (dma_p->alength < length) {
2305 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2306 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2307 		ddi_dma_mem_free(&dma_p->acc_handle);
2308 		ddi_dma_free_handle(&dma_p->dma_handle);
2309 		dma_p->acc_handle = NULL;
2310 		dma_p->dma_handle = NULL;
2311 		return (HXGE_ERROR);
2312 	}
2313 
2314 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2315 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2316 	    &dma_p->dma_cookie, &dma_p->ncookies);
2317 	if (ddi_status != DDI_DMA_MAPPED) {
2318 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2319 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2320 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2321 		if (dma_p->acc_handle) {
2322 			ddi_dma_mem_free(&dma_p->acc_handle);
2323 			dma_p->acc_handle = NULL;
2324 		}
2325 		ddi_dma_free_handle(&dma_p->dma_handle);
2326 		dma_p->dma_handle = NULL;
2327 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2328 	}
2329 
2330 	if (dma_p->ncookies != 1) {
2331 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2332 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2333 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2334 		if (dma_p->acc_handle) {
2335 			ddi_dma_mem_free(&dma_p->acc_handle);
2336 			dma_p->acc_handle = NULL;
2337 		}
2338 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2339 		ddi_dma_free_handle(&dma_p->dma_handle);
2340 		dma_p->dma_handle = NULL;
2341 		return (HXGE_ERROR);
2342 	}
2343 
2344 	dma_p->kaddrp = kaddrp;
2345 #if defined(__i386)
2346 	dma_p->ioaddr_pp =
2347 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2348 #else
2349 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2350 #endif
2351 
2352 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2353 
2354 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2355 	    "dma buffer allocated: dma_p $%p "
2356 	    "return dmac_ladress from cookie $%p dmac_size %d "
2357 	    "dma_p->ioaddr_p $%p "
2358 	    "dma_p->orig_ioaddr_p $%p "
2359 	    "orig_vatopa $%p "
2360 	    "alength %d (0x%x) "
2361 	    "kaddrp $%p "
2362 	    "length %d (0x%x)",
2363 	    dma_p,
2364 	    dma_p->dma_cookie.dmac_laddress,
2365 	    dma_p->dma_cookie.dmac_size,
2366 	    dma_p->ioaddr_pp,
2367 	    dma_p->orig_ioaddr_pp,
2368 	    dma_p->orig_vatopa,
2369 	    dma_p->alength, dma_p->alength,
2370 	    kaddrp,
2371 	    length, length));
2372 
2373 	return (HXGE_OK);
2374 }
2375 
2376 static void
2377 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2378 {
2379 	if (dma_p == NULL)
2380 		return;
2381 
2382 	if (dma_p->dma_handle != NULL) {
2383 		if (dma_p->ncookies) {
2384 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2385 			dma_p->ncookies = 0;
2386 		}
2387 		ddi_dma_free_handle(&dma_p->dma_handle);
2388 		dma_p->dma_handle = NULL;
2389 	}
2390 
2391 	if (dma_p->acc_handle != NULL) {
2392 		ddi_dma_mem_free(&dma_p->acc_handle);
2393 		dma_p->acc_handle = NULL;
2394 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2395 	}
2396 
2397 	dma_p->kaddrp = NULL;
2398 	dma_p->alength = NULL;
2399 }
2400 
2401 /*
2402  *	hxge_m_start() -- start transmitting and receiving.
2403  *
2404  *	This function is called by the MAC layer when the first
2405  *	stream is open to prepare the hardware ready for sending
2406  *	and transmitting packets.
2407  */
2408 static int
2409 hxge_m_start(void *arg)
2410 {
2411 	p_hxge_t hxgep = (p_hxge_t)arg;
2412 
2413 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2414 
2415 	MUTEX_ENTER(hxgep->genlock);
2416 
2417 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2418 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2419 		    "<== hxge_m_start: initialization failed"));
2420 		MUTEX_EXIT(hxgep->genlock);
2421 		return (EIO);
2422 	}
2423 
2424 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2425 		/*
2426 		 * Start timer to check the system error and tx hangs
2427 		 */
2428 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2429 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2430 
2431 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2432 
2433 		hxgep->timeout.link_status = 0;
2434 		hxgep->timeout.report_link_status = B_TRUE;
2435 		hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2436 
2437 		/* Start the link status timer to check the link status */
2438 		MUTEX_ENTER(&hxgep->timeout.lock);
2439 		hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2440 		    hxgep->timeout.ticks);
2441 		MUTEX_EXIT(&hxgep->timeout.lock);
2442 	}
2443 
2444 	MUTEX_EXIT(hxgep->genlock);
2445 
2446 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2447 
2448 	return (0);
2449 }
2450 
2451 /*
2452  * hxge_m_stop(): stop transmitting and receiving.
2453  */
2454 static void
2455 hxge_m_stop(void *arg)
2456 {
2457 	p_hxge_t hxgep = (p_hxge_t)arg;
2458 
2459 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2460 
2461 	if (hxgep->hxge_timerid) {
2462 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2463 		hxgep->hxge_timerid = 0;
2464 	}
2465 
2466 	/* Stop the link status timer before unregistering */
2467 	MUTEX_ENTER(&hxgep->timeout.lock);
2468 	if (hxgep->timeout.id) {
2469 		(void) untimeout(hxgep->timeout.id);
2470 		hxgep->timeout.id = 0;
2471 	}
2472 	hxge_link_update(hxgep, LINK_STATE_DOWN);
2473 	MUTEX_EXIT(&hxgep->timeout.lock);
2474 
2475 	MUTEX_ENTER(hxgep->genlock);
2476 
2477 	hxge_uninit(hxgep);
2478 
2479 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2480 
2481 	MUTEX_EXIT(hxgep->genlock);
2482 
2483 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2484 }
2485 
2486 static int
2487 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2488 {
2489 	p_hxge_t		hxgep = (p_hxge_t)arg;
2490 	struct ether_addr	addrp;
2491 
2492 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2493 
2494 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2495 
2496 	if (add) {
2497 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2498 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2499 			    "<== hxge_m_multicst: add multicast failed"));
2500 			return (EINVAL);
2501 		}
2502 	} else {
2503 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2504 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2505 			    "<== hxge_m_multicst: del multicast failed"));
2506 			return (EINVAL);
2507 		}
2508 	}
2509 
2510 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2511 
2512 	return (0);
2513 }
2514 
2515 static int
2516 hxge_m_promisc(void *arg, boolean_t on)
2517 {
2518 	p_hxge_t hxgep = (p_hxge_t)arg;
2519 
2520 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2521 
2522 	if (hxge_set_promisc(hxgep, on)) {
2523 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2524 		    "<== hxge_m_promisc: set promisc failed"));
2525 		return (EINVAL);
2526 	}
2527 
2528 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2529 
2530 	return (0);
2531 }
2532 
2533 static void
2534 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2535 {
2536 	p_hxge_t	hxgep = (p_hxge_t)arg;
2537 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2538 	boolean_t	need_privilege;
2539 	int		err;
2540 	int		cmd;
2541 
2542 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2543 
2544 	iocp = (struct iocblk *)mp->b_rptr;
2545 	iocp->ioc_error = 0;
2546 	need_privilege = B_TRUE;
2547 	cmd = iocp->ioc_cmd;
2548 
2549 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2550 	switch (cmd) {
2551 	default:
2552 		miocnak(wq, mp, 0, EINVAL);
2553 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2554 		return;
2555 
2556 	case LB_GET_INFO_SIZE:
2557 	case LB_GET_INFO:
2558 	case LB_GET_MODE:
2559 		need_privilege = B_FALSE;
2560 		break;
2561 
2562 	case LB_SET_MODE:
2563 		break;
2564 
2565 	case ND_GET:
2566 		need_privilege = B_FALSE;
2567 		break;
2568 	case ND_SET:
2569 		break;
2570 
2571 	case HXGE_GET_TX_RING_SZ:
2572 	case HXGE_GET_TX_DESC:
2573 	case HXGE_TX_SIDE_RESET:
2574 	case HXGE_RX_SIDE_RESET:
2575 	case HXGE_GLOBAL_RESET:
2576 	case HXGE_RESET_MAC:
2577 	case HXGE_PUT_TCAM:
2578 	case HXGE_GET_TCAM:
2579 	case HXGE_RTRACE:
2580 
2581 		need_privilege = B_FALSE;
2582 		break;
2583 	}
2584 
2585 	if (need_privilege) {
2586 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2587 		if (err != 0) {
2588 			miocnak(wq, mp, 0, err);
2589 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2590 			    "<== hxge_m_ioctl: no priv"));
2591 			return;
2592 		}
2593 	}
2594 
2595 	switch (cmd) {
2596 	case ND_GET:
2597 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2598 	case ND_SET:
2599 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2600 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2601 		break;
2602 
2603 	case LB_GET_MODE:
2604 	case LB_SET_MODE:
2605 	case LB_GET_INFO_SIZE:
2606 	case LB_GET_INFO:
2607 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2608 		break;
2609 
2610 	case HXGE_PUT_TCAM:
2611 	case HXGE_GET_TCAM:
2612 	case HXGE_GET_TX_RING_SZ:
2613 	case HXGE_GET_TX_DESC:
2614 	case HXGE_TX_SIDE_RESET:
2615 	case HXGE_RX_SIDE_RESET:
2616 	case HXGE_GLOBAL_RESET:
2617 	case HXGE_RESET_MAC:
2618 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2619 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2620 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2621 		break;
2622 	}
2623 
2624 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2625 }
2626 
2627 /*ARGSUSED*/
2628 static int
2629 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2630 {
2631 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)rdriver;
2632 	p_hxge_t		hxgep;
2633 	p_tx_ring_t		ring;
2634 
2635 	ASSERT(rhp != NULL);
2636 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2637 
2638 	hxgep = rhp->hxgep;
2639 
2640 	/*
2641 	 * Get the ring pointer.
2642 	 */
2643 	ring = hxgep->tx_rings->rings[rhp->index];
2644 
2645 	/*
2646 	 * Fill in the handle for the transmit.
2647 	 */
2648 	MUTEX_ENTER(&ring->lock);
2649 	ring->ring_handle = rhp->ring_handle;
2650 	MUTEX_EXIT(&ring->lock);
2651 
2652 	return (0);
2653 }
2654 
2655 static void
2656 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2657 {
2658 	p_hxge_ring_handle_t    rhp = (p_hxge_ring_handle_t)rdriver;
2659 	p_hxge_t		hxgep;
2660 	p_tx_ring_t		ring;
2661 
2662 	ASSERT(rhp != NULL);
2663 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2664 
2665 	hxgep = rhp->hxgep;
2666 	ring = hxgep->tx_rings->rings[rhp->index];
2667 
2668 	MUTEX_ENTER(&ring->lock);
2669 	ring->ring_handle = (mac_ring_handle_t)NULL;
2670 	MUTEX_EXIT(&ring->lock);
2671 }
2672 
2673 static int
2674 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2675 {
2676 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)rdriver;
2677 	p_hxge_t		hxgep;
2678 	p_rx_rcr_ring_t		ring;
2679 	int			i;
2680 
2681 	ASSERT(rhp != NULL);
2682 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2683 
2684 	hxgep = rhp->hxgep;
2685 
2686 	/*
2687 	 * Get pointer to ring.
2688 	 */
2689 	ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2690 
2691 	MUTEX_ENTER(&ring->lock);
2692 
2693 	if (rhp->started) {
2694 		MUTEX_EXIT(&ring->lock);
2695 		return (0);
2696 	}
2697 
2698 	/*
2699 	 * Set the ldvp and ldgp pointers to enable/disable
2700 	 * polling.
2701 	 */
2702 	for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2703 		if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2704 		    (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2705 			ring->ldvp = &hxgep->ldgvp->ldvp[i];
2706 			ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2707 			break;
2708 		}
2709 	}
2710 
2711 	rhp->started = B_TRUE;
2712 	ring->rcr_mac_handle = rhp->ring_handle;
2713 	ring->rcr_gen_num = mr_gen_num;
2714 	MUTEX_EXIT(&ring->lock);
2715 
2716 	return (0);
2717 }
2718 
2719 static void
2720 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2721 {
2722 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)rdriver;
2723 	p_hxge_t		hxgep;
2724 	p_rx_rcr_ring_t		ring;
2725 
2726 	ASSERT(rhp != NULL);
2727 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2728 
2729 	hxgep = rhp->hxgep;
2730 	ring =  hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2731 
2732 	MUTEX_ENTER(&ring->lock);
2733 	rhp->started = B_TRUE;
2734 	ring->rcr_mac_handle = NULL;
2735 	ring->ldvp = NULL;
2736 	ring->ldgp = NULL;
2737 	MUTEX_EXIT(&ring->lock);
2738 }
2739 
2740 static int
2741 hxge_rx_group_start(mac_group_driver_t gdriver)
2742 {
2743 	hxge_ring_group_t	*group = (hxge_ring_group_t *)gdriver;
2744 
2745 	ASSERT(group->hxgep != NULL);
2746 	ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2747 
2748 	MUTEX_ENTER(group->hxgep->genlock);
2749 	group->started = B_TRUE;
2750 	MUTEX_EXIT(group->hxgep->genlock);
2751 
2752 	return (0);
2753 }
2754 
2755 static void
2756 hxge_rx_group_stop(mac_group_driver_t gdriver)
2757 {
2758 	hxge_ring_group_t	*group = (hxge_ring_group_t *)gdriver;
2759 
2760 	ASSERT(group->hxgep != NULL);
2761 	ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2762 	ASSERT(group->started == B_TRUE);
2763 
2764 	MUTEX_ENTER(group->hxgep->genlock);
2765 	group->started = B_FALSE;
2766 	MUTEX_EXIT(group->hxgep->genlock);
2767 }
2768 
2769 static int
2770 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2771 {
2772 	int	i;
2773 
2774 	/*
2775 	 * Find an open slot.
2776 	 */
2777 	for (i = 0; i < hxgep->mmac.total; i++) {
2778 		if (!hxgep->mmac.addrs[i].set) {
2779 			*slot = i;
2780 			return (0);
2781 		}
2782 	}
2783 
2784 	return (ENXIO);
2785 }
2786 
2787 static int
2788 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2789 {
2790 	struct ether_addr	eaddr;
2791 	hxge_status_t		status = HXGE_OK;
2792 
2793 	bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2794 
2795 	/*
2796 	 * Set new interface local address and re-init device.
2797 	 * This is destructive to any other streams attached
2798 	 * to this device.
2799 	 */
2800 	RW_ENTER_WRITER(&hxgep->filter_lock);
2801 	status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2802 	RW_EXIT(&hxgep->filter_lock);
2803 	if (status != HXGE_OK)
2804 		return (status);
2805 
2806 	hxgep->mmac.addrs[slot].set = B_TRUE;
2807 	bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2808 	hxgep->mmac.available--;
2809 	if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2810 		hxgep->mmac.addrs[slot].primary = B_TRUE;
2811 
2812 	return (0);
2813 }
2814 
2815 static int
2816 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2817 {
2818 	int	i, result;
2819 
2820 	for (i = 0; i < hxgep->mmac.total; i++) {
2821 		if (hxgep->mmac.addrs[i].set) {
2822 			result = memcmp(hxgep->mmac.addrs[i].addr,
2823 			    addr, ETHERADDRL);
2824 			if (result == 0) {
2825 				*slot = i;
2826 				return (0);
2827 			}
2828 		}
2829 	}
2830 
2831 	return (EINVAL);
2832 }
2833 
2834 static int
2835 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2836 {
2837 	hxge_status_t	status;
2838 	int		i;
2839 
2840 	status = hxge_pfc_clear_mac_address(hxgep, slot);
2841 	if (status != HXGE_OK)
2842 		return (status);
2843 
2844 	for (i = 0; i < ETHERADDRL; i++)
2845 		hxgep->mmac.addrs[slot].addr[i] = 0;
2846 
2847 	hxgep->mmac.addrs[slot].set = B_FALSE;
2848 	if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2849 		hxgep->mmac.addrs[slot].primary = B_FALSE;
2850 	hxgep->mmac.available++;
2851 
2852 	return (0);
2853 }
2854 
2855 static int
2856 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2857 {
2858 	hxge_ring_group_t	*group = arg;
2859 	p_hxge_t		hxgep = group->hxgep;
2860 	int			slot = 0;
2861 
2862 	ASSERT(group->type == MAC_RING_TYPE_RX);
2863 
2864 	MUTEX_ENTER(hxgep->genlock);
2865 
2866 	/*
2867 	 * Find a slot for the address.
2868 	 */
2869 	if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2870 		MUTEX_EXIT(hxgep->genlock);
2871 		return (ENOSPC);
2872 	}
2873 
2874 	/*
2875 	 * Program the MAC address.
2876 	 */
2877 	if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2878 		MUTEX_EXIT(hxgep->genlock);
2879 		return (ENOSPC);
2880 	}
2881 
2882 	MUTEX_EXIT(hxgep->genlock);
2883 	return (0);
2884 }
2885 
2886 static int
2887 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2888 {
2889 	hxge_ring_group_t	*group = arg;
2890 	p_hxge_t		hxgep = group->hxgep;
2891 	int			rv, slot;
2892 
2893 	ASSERT(group->type == MAC_RING_TYPE_RX);
2894 
2895 	MUTEX_ENTER(hxgep->genlock);
2896 
2897 	if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2898 		MUTEX_EXIT(hxgep->genlock);
2899 		return (rv);
2900 	}
2901 
2902 	if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2903 		MUTEX_EXIT(hxgep->genlock);
2904 		return (rv);
2905 	}
2906 
2907 	MUTEX_EXIT(hxgep->genlock);
2908 	return (0);
2909 }
2910 
2911 static void
2912 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2913     mac_group_info_t *infop, mac_group_handle_t gh)
2914 {
2915 	p_hxge_t		hxgep = arg;
2916 	hxge_ring_group_t	*group;
2917 
2918 	ASSERT(type == MAC_RING_TYPE_RX);
2919 
2920 	switch (type) {
2921 	case MAC_RING_TYPE_RX:
2922 		group = &hxgep->rx_groups[groupid];
2923 		group->hxgep = hxgep;
2924 		group->ghandle = gh;
2925 		group->index = groupid;
2926 		group->type = type;
2927 
2928 		infop->mgi_driver = (mac_group_driver_t)group;
2929 		infop->mgi_start = hxge_rx_group_start;
2930 		infop->mgi_stop = hxge_rx_group_stop;
2931 		infop->mgi_addmac = hxge_rx_group_add_mac;
2932 		infop->mgi_remmac = hxge_rx_group_rem_mac;
2933 		infop->mgi_count = HXGE_MAX_RDCS;
2934 		break;
2935 
2936 	case MAC_RING_TYPE_TX:
2937 	default:
2938 		break;
2939 	}
2940 }
2941 
2942 /*
2943  * Callback function for the GLDv3 layer to register all rings.
2944  */
2945 /*ARGSUSED*/
2946 static void
2947 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2948     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2949 {
2950 	p_hxge_t	hxgep = arg;
2951 
2952 	switch (type) {
2953 	case MAC_RING_TYPE_TX: {
2954 		p_hxge_ring_handle_t	rhp;
2955 
2956 		ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2957 		rhp = &hxgep->tx_ring_handles[index];
2958 		rhp->hxgep = hxgep;
2959 		rhp->index = index;
2960 		rhp->ring_handle = rh;
2961 		infop->mri_driver = (mac_ring_driver_t)rhp;
2962 		infop->mri_start = hxge_tx_ring_start;
2963 		infop->mri_stop = hxge_tx_ring_stop;
2964 		infop->mri_tx = hxge_tx_ring_send;
2965 		break;
2966 	}
2967 	case MAC_RING_TYPE_RX: {
2968 		p_hxge_ring_handle_t    rhp;
2969 		mac_intr_t		hxge_mac_intr;
2970 
2971 		ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
2972 		rhp = &hxgep->rx_ring_handles[index];
2973 		rhp->hxgep = hxgep;
2974 		rhp->index = index;
2975 		rhp->ring_handle = rh;
2976 
2977 		/*
2978 		 * Entrypoint to enable interrupt (disable poll) and
2979 		 * disable interrupt (enable poll).
2980 		 */
2981 		hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
2982 		hxge_mac_intr.mi_enable =
2983 		    (mac_intr_enable_t)hxge_disable_poll;
2984 		hxge_mac_intr.mi_disable =
2985 		    (mac_intr_disable_t)hxge_enable_poll;
2986 		infop->mri_driver = (mac_ring_driver_t)rhp;
2987 		infop->mri_start = hxge_rx_ring_start;
2988 		infop->mri_stop = hxge_rx_ring_stop;
2989 		infop->mri_intr = hxge_mac_intr;
2990 		infop->mri_poll = hxge_rx_poll;
2991 		break;
2992 	}
2993 	default:
2994 		break;
2995 	}
2996 }
2997 
2998 /*ARGSUSED*/
2999 boolean_t
3000 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3001 {
3002 	p_hxge_t	hxgep = arg;
3003 
3004 	switch (cap) {
3005 	case MAC_CAPAB_HCKSUM: {
3006 		uint32_t	*txflags = cap_data;
3007 
3008 		*txflags = HCKSUM_INET_PARTIAL;
3009 		break;
3010 	}
3011 
3012 	case MAC_CAPAB_RINGS: {
3013 		mac_capab_rings_t	*cap_rings = cap_data;
3014 
3015 		MUTEX_ENTER(hxgep->genlock);
3016 		if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3017 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3018 			cap_rings->mr_rnum = HXGE_MAX_RDCS;
3019 			cap_rings->mr_rget = hxge_fill_ring;
3020 			cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3021 			cap_rings->mr_gget = hxge_group_get;
3022 			cap_rings->mr_gaddring = NULL;
3023 			cap_rings->mr_gremring = NULL;
3024 		} else {
3025 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3026 			cap_rings->mr_rnum = HXGE_MAX_TDCS;
3027 			cap_rings->mr_rget = hxge_fill_ring;
3028 			cap_rings->mr_gnum = 0;
3029 			cap_rings->mr_gget = NULL;
3030 			cap_rings->mr_gaddring = NULL;
3031 			cap_rings->mr_gremring = NULL;
3032 		}
3033 		MUTEX_EXIT(hxgep->genlock);
3034 		break;
3035 	}
3036 
3037 	default:
3038 		return (B_FALSE);
3039 	}
3040 	return (B_TRUE);
3041 }
3042 
3043 static boolean_t
3044 hxge_param_locked(mac_prop_id_t pr_num)
3045 {
3046 	/*
3047 	 * All adv_* parameters are locked (read-only) while
3048 	 * the device is in any sort of loopback mode ...
3049 	 */
3050 	switch (pr_num) {
3051 		case MAC_PROP_ADV_1000FDX_CAP:
3052 		case MAC_PROP_EN_1000FDX_CAP:
3053 		case MAC_PROP_ADV_1000HDX_CAP:
3054 		case MAC_PROP_EN_1000HDX_CAP:
3055 		case MAC_PROP_ADV_100FDX_CAP:
3056 		case MAC_PROP_EN_100FDX_CAP:
3057 		case MAC_PROP_ADV_100HDX_CAP:
3058 		case MAC_PROP_EN_100HDX_CAP:
3059 		case MAC_PROP_ADV_10FDX_CAP:
3060 		case MAC_PROP_EN_10FDX_CAP:
3061 		case MAC_PROP_ADV_10HDX_CAP:
3062 		case MAC_PROP_EN_10HDX_CAP:
3063 		case MAC_PROP_AUTONEG:
3064 		case MAC_PROP_FLOWCTRL:
3065 			return (B_TRUE);
3066 	}
3067 	return (B_FALSE);
3068 }
3069 
3070 /*
3071  * callback functions for set/get of properties
3072  */
3073 static int
3074 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3075     uint_t pr_valsize, const void *pr_val)
3076 {
3077 	hxge_t		*hxgep = barg;
3078 	p_hxge_stats_t	statsp;
3079 	int		err = 0;
3080 	uint32_t	new_mtu, old_framesize, new_framesize;
3081 
3082 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3083 
3084 	statsp = hxgep->statsp;
3085 	MUTEX_ENTER(hxgep->genlock);
3086 	if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3087 	    hxge_param_locked(pr_num)) {
3088 		/*
3089 		 * All adv_* parameters are locked (read-only)
3090 		 * while the device is in any sort of loopback mode.
3091 		 */
3092 		HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3093 		    "==> hxge_m_setprop: loopback mode: read only"));
3094 		MUTEX_EXIT(hxgep->genlock);
3095 		return (EBUSY);
3096 	}
3097 
3098 	switch (pr_num) {
3099 		/*
3100 		 * These properties are either not exist or read only
3101 		 */
3102 		case MAC_PROP_EN_1000FDX_CAP:
3103 		case MAC_PROP_EN_100FDX_CAP:
3104 		case MAC_PROP_EN_10FDX_CAP:
3105 		case MAC_PROP_EN_1000HDX_CAP:
3106 		case MAC_PROP_EN_100HDX_CAP:
3107 		case MAC_PROP_EN_10HDX_CAP:
3108 		case MAC_PROP_ADV_1000FDX_CAP:
3109 		case MAC_PROP_ADV_1000HDX_CAP:
3110 		case MAC_PROP_ADV_100FDX_CAP:
3111 		case MAC_PROP_ADV_100HDX_CAP:
3112 		case MAC_PROP_ADV_10FDX_CAP:
3113 		case MAC_PROP_ADV_10HDX_CAP:
3114 		case MAC_PROP_STATUS:
3115 		case MAC_PROP_SPEED:
3116 		case MAC_PROP_DUPLEX:
3117 		case MAC_PROP_AUTONEG:
3118 		/*
3119 		 * Flow control is handled in the shared domain and
3120 		 * it is readonly here.
3121 		 */
3122 		case MAC_PROP_FLOWCTRL:
3123 			err = EINVAL;
3124 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3125 			    "==> hxge_m_setprop:  read only property %d",
3126 			    pr_num));
3127 			break;
3128 
3129 		case MAC_PROP_MTU:
3130 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3131 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3132 			    "==> hxge_m_setprop: set MTU: %d", new_mtu));
3133 
3134 			new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3135 			if (new_framesize == hxgep->vmac.maxframesize) {
3136 				err = 0;
3137 				break;
3138 			}
3139 
3140 			if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3141 				err = EBUSY;
3142 				break;
3143 			}
3144 
3145 			if (new_framesize < MIN_FRAME_SIZE ||
3146 			    new_framesize > MAX_FRAME_SIZE) {
3147 				err = EINVAL;
3148 				break;
3149 			}
3150 
3151 			old_framesize = hxgep->vmac.maxframesize;
3152 			hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3153 
3154 			if (hxge_vmac_set_framesize(hxgep)) {
3155 				hxgep->vmac.maxframesize =
3156 				    (uint16_t)old_framesize;
3157 				err = EINVAL;
3158 				break;
3159 			}
3160 
3161 			err = mac_maxsdu_update(hxgep->mach, new_mtu);
3162 			if (err) {
3163 				hxgep->vmac.maxframesize =
3164 				    (uint16_t)old_framesize;
3165 				(void) hxge_vmac_set_framesize(hxgep);
3166 			}
3167 
3168 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3169 			    "==> hxge_m_setprop: set MTU: %d maxframe %d",
3170 			    new_mtu, hxgep->vmac.maxframesize));
3171 			break;
3172 
3173 		case MAC_PROP_PRIVATE:
3174 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3175 			    "==> hxge_m_setprop: private property"));
3176 			err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3177 			    pr_val);
3178 			break;
3179 
3180 		default:
3181 			err = ENOTSUP;
3182 			break;
3183 	}
3184 
3185 	MUTEX_EXIT(hxgep->genlock);
3186 
3187 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3188 	    "<== hxge_m_setprop (return %d)", err));
3189 
3190 	return (err);
3191 }
3192 
3193 /* ARGSUSED */
3194 static int
3195 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
3196     void *pr_val)
3197 {
3198 	int		err = 0;
3199 	link_flowctrl_t	fl;
3200 
3201 	switch (pr_num) {
3202 	case MAC_PROP_DUPLEX:
3203 		*(uint8_t *)pr_val = 2;
3204 		break;
3205 	case MAC_PROP_AUTONEG:
3206 		*(uint8_t *)pr_val = 0;
3207 		break;
3208 	case MAC_PROP_FLOWCTRL:
3209 		if (pr_valsize < sizeof (link_flowctrl_t))
3210 			return (EINVAL);
3211 		fl = LINK_FLOWCTRL_TX;
3212 		bcopy(&fl, pr_val, sizeof (fl));
3213 		break;
3214 	default:
3215 		err = ENOTSUP;
3216 		break;
3217 	}
3218 	return (err);
3219 }
3220 
3221 static int
3222 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3223     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3224 {
3225 	hxge_t 		*hxgep = barg;
3226 	p_hxge_stats_t	statsp = hxgep->statsp;
3227 	int		err = 0;
3228 	link_flowctrl_t fl;
3229 	uint64_t	tmp = 0;
3230 	link_state_t	ls;
3231 
3232 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3233 	    "==> hxge_m_getprop: pr_num %d", pr_num));
3234 
3235 	if (pr_valsize == 0)
3236 		return (EINVAL);
3237 
3238 	*perm = MAC_PROP_PERM_RW;
3239 
3240 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3241 		err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val);
3242 		return (err);
3243 	}
3244 
3245 	bzero(pr_val, pr_valsize);
3246 	switch (pr_num) {
3247 		case MAC_PROP_DUPLEX:
3248 			*perm = MAC_PROP_PERM_READ;
3249 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3250 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3251 			    "==> hxge_m_getprop: duplex mode %d",
3252 			    *(uint8_t *)pr_val));
3253 			break;
3254 
3255 		case MAC_PROP_SPEED:
3256 			*perm = MAC_PROP_PERM_READ;
3257 			if (pr_valsize < sizeof (uint64_t))
3258 				return (EINVAL);
3259 			tmp = statsp->mac_stats.link_speed * 1000000ull;
3260 			bcopy(&tmp, pr_val, sizeof (tmp));
3261 			break;
3262 
3263 		case MAC_PROP_STATUS:
3264 			*perm = MAC_PROP_PERM_READ;
3265 			if (pr_valsize < sizeof (link_state_t))
3266 				return (EINVAL);
3267 			if (!statsp->mac_stats.link_up)
3268 				ls = LINK_STATE_DOWN;
3269 			else
3270 				ls = LINK_STATE_UP;
3271 			bcopy(&ls, pr_val, sizeof (ls));
3272 			break;
3273 
3274 		case MAC_PROP_FLOWCTRL:
3275 			/*
3276 			 * Flow control is supported by the shared domain and
3277 			 * it is currently transmit only
3278 			 */
3279 			*perm = MAC_PROP_PERM_READ;
3280 			if (pr_valsize < sizeof (link_flowctrl_t))
3281 				return (EINVAL);
3282 			fl = LINK_FLOWCTRL_TX;
3283 			bcopy(&fl, pr_val, sizeof (fl));
3284 			break;
3285 		case MAC_PROP_AUTONEG:
3286 			/* 10G link only and it is not negotiable */
3287 			*perm = MAC_PROP_PERM_READ;
3288 			*(uint8_t *)pr_val = 0;
3289 			break;
3290 		case MAC_PROP_ADV_1000FDX_CAP:
3291 		case MAC_PROP_ADV_100FDX_CAP:
3292 		case MAC_PROP_ADV_10FDX_CAP:
3293 		case MAC_PROP_ADV_1000HDX_CAP:
3294 		case MAC_PROP_ADV_100HDX_CAP:
3295 		case MAC_PROP_ADV_10HDX_CAP:
3296 		case MAC_PROP_EN_1000FDX_CAP:
3297 		case MAC_PROP_EN_100FDX_CAP:
3298 		case MAC_PROP_EN_10FDX_CAP:
3299 		case MAC_PROP_EN_1000HDX_CAP:
3300 		case MAC_PROP_EN_100HDX_CAP:
3301 		case MAC_PROP_EN_10HDX_CAP:
3302 			err = ENOTSUP;
3303 			break;
3304 
3305 		case MAC_PROP_PRIVATE:
3306 			err = hxge_get_priv_prop(hxgep, pr_name, pr_flags,
3307 			    pr_valsize, pr_val);
3308 			break;
3309 		case MAC_PROP_MTU: {
3310 			mac_propval_range_t range;
3311 
3312 			if (!(pr_flags & MAC_PROP_POSSIBLE))
3313 				return (ENOTSUP);
3314 			if (pr_valsize < sizeof (mac_propval_range_t))
3315 				return (EINVAL);
3316 			range.mpr_count = 1;
3317 			range.mpr_type = MAC_PROPVAL_UINT32;
3318 			range.range_uint32[0].mpur_min = MIN_FRAME_SIZE -
3319 			    MTU_TO_FRAME_SIZE;
3320 			range.range_uint32[0].mpur_max = MAX_FRAME_SIZE -
3321 			    MTU_TO_FRAME_SIZE;
3322 			bcopy(&range, pr_val, sizeof (range));
3323 			break;
3324 		}
3325 		default:
3326 			err = EINVAL;
3327 			break;
3328 	}
3329 
3330 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3331 
3332 	return (err);
3333 }
3334 
3335 /* ARGSUSED */
3336 static int
3337 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3338     const void *pr_val)
3339 {
3340 	p_hxge_param_t	param_arr = hxgep->param_arr;
3341 	int		err = 0;
3342 
3343 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3344 	    "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3345 
3346 	if (pr_val == NULL) {
3347 		return (EINVAL);
3348 	}
3349 
3350 	/* Blanking */
3351 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3352 		err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3353 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3354 	} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3355 		err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3356 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3357 
3358 	/* Classification */
3359 	} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3360 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3361 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3362 	} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3363 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3364 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3365 	} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3366 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3367 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3368 	} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3369 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3370 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3371 	} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3372 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3373 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3374 	} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3375 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3376 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3377 	} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3378 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3379 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3380 	} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3381 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3382 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3383 	} else {
3384 		err = EINVAL;
3385 	}
3386 
3387 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3388 	    "<== hxge_set_priv_prop: err %d", err));
3389 
3390 	return (err);
3391 }
3392 
3393 static int
3394 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags,
3395     uint_t pr_valsize, void *pr_val)
3396 {
3397 	p_hxge_param_t	param_arr = hxgep->param_arr;
3398 	char		valstr[MAXNAMELEN];
3399 	int		err = 0;
3400 	uint_t		strsize;
3401 	int		value = 0;
3402 
3403 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3404 	    "==> hxge_get_priv_prop: property %s", pr_name));
3405 
3406 	if (pr_flags & MAC_PROP_DEFAULT) {
3407 		/* Receive Interrupt Blanking Parameters */
3408 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3409 			value = RXDMA_RCR_TO_DEFAULT;
3410 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3411 			value = RXDMA_RCR_PTHRES_DEFAULT;
3412 
3413 		/* Classification and Load Distribution Configuration */
3414 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3415 		    strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3416 		    strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3417 		    strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3418 		    strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3419 		    strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3420 		    strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3421 		    strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3422 			value = HXGE_CLASS_TCAM_LOOKUP;
3423 		} else {
3424 			err = EINVAL;
3425 		}
3426 	} else {
3427 		/* Receive Interrupt Blanking Parameters */
3428 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3429 			value = hxgep->intr_timeout;
3430 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3431 			value = hxgep->intr_threshold;
3432 
3433 		/* Classification and Load Distribution Configuration */
3434 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3435 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3436 			    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3437 
3438 			value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3439 		} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3440 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3441 			    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3442 
3443 			value = (int)param_arr[param_class_opt_ipv4_udp].value;
3444 		} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3445 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3446 			    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3447 
3448 			value = (int)param_arr[param_class_opt_ipv4_ah].value;
3449 		} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3450 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3451 			    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3452 
3453 			value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3454 		} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3455 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3456 			    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3457 
3458 			value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3459 		} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3460 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3461 			    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3462 
3463 			value = (int)param_arr[param_class_opt_ipv6_udp].value;
3464 		} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3465 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3466 			    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3467 
3468 			value = (int)param_arr[param_class_opt_ipv6_ah].value;
3469 		} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3470 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3471 			    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3472 
3473 			value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3474 		} else {
3475 			err = EINVAL;
3476 		}
3477 	}
3478 
3479 	if (err == 0) {
3480 		(void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3481 
3482 		strsize = (uint_t)strlen(valstr);
3483 		if (pr_valsize < strsize) {
3484 			err = ENOBUFS;
3485 		} else {
3486 			(void) strlcpy(pr_val, valstr, pr_valsize);
3487 		}
3488 	}
3489 
3490 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3491 	    "<== hxge_get_priv_prop: return %d", err));
3492 
3493 	return (err);
3494 }
3495 /*
3496  * Module loading and removing entry points.
3497  */
3498 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3499     nodev, NULL, D_MP, NULL, NULL);
3500 
3501 extern struct mod_ops mod_driverops;
3502 
3503 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
3504 
3505 /*
3506  * Module linkage information for the kernel.
3507  */
3508 static struct modldrv hxge_modldrv = {
3509 	&mod_driverops,
3510 	HXGE_DESC_VER,
3511 	&hxge_dev_ops
3512 };
3513 
3514 static struct modlinkage modlinkage = {
3515 	MODREV_1, (void *) &hxge_modldrv, NULL
3516 };
3517 
3518 int
3519 _init(void)
3520 {
3521 	int status;
3522 
3523 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3524 	mac_init_ops(&hxge_dev_ops, "hxge");
3525 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3526 	if (status != 0) {
3527 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3528 		    "failed to init device soft state"));
3529 		mac_fini_ops(&hxge_dev_ops);
3530 		goto _init_exit;
3531 	}
3532 
3533 	status = mod_install(&modlinkage);
3534 	if (status != 0) {
3535 		ddi_soft_state_fini(&hxge_list);
3536 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3537 		goto _init_exit;
3538 	}
3539 
3540 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3541 
3542 _init_exit:
3543 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3544 
3545 	return (status);
3546 }
3547 
3548 int
3549 _fini(void)
3550 {
3551 	int status;
3552 
3553 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3554 
3555 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3556 
3557 	if (hxge_mblks_pending)
3558 		return (EBUSY);
3559 
3560 	status = mod_remove(&modlinkage);
3561 	if (status != DDI_SUCCESS) {
3562 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
3563 		    "Module removal failed 0x%08x", status));
3564 		goto _fini_exit;
3565 	}
3566 
3567 	mac_fini_ops(&hxge_dev_ops);
3568 
3569 	ddi_soft_state_fini(&hxge_list);
3570 
3571 	MUTEX_DESTROY(&hxge_common_lock);
3572 
3573 _fini_exit:
3574 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3575 
3576 	return (status);
3577 }
3578 
3579 int
3580 _info(struct modinfo *modinfop)
3581 {
3582 	int status;
3583 
3584 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3585 	status = mod_info(&modlinkage, modinfop);
3586 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3587 
3588 	return (status);
3589 }
3590 
3591 /*ARGSUSED*/
3592 hxge_status_t
3593 hxge_add_intrs(p_hxge_t hxgep)
3594 {
3595 	int		intr_types;
3596 	int		type = 0;
3597 	int		ddi_status = DDI_SUCCESS;
3598 	hxge_status_t	status = HXGE_OK;
3599 
3600 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3601 
3602 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
3603 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3604 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
3605 	hxgep->hxge_intr_type.intr_added = 0;
3606 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3607 	hxgep->hxge_intr_type.intr_type = 0;
3608 
3609 	if (hxge_msi_enable) {
3610 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3611 	}
3612 
3613 	/* Get the supported interrupt types */
3614 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3615 	    != DDI_SUCCESS) {
3616 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3617 		    "ddi_intr_get_supported_types failed: status 0x%08x",
3618 		    ddi_status));
3619 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3620 	}
3621 
3622 	hxgep->hxge_intr_type.intr_types = intr_types;
3623 
3624 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3625 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3626 
3627 	/*
3628 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3629 	 *	(1): 1 - MSI
3630 	 *	(2): 2 - MSI-X
3631 	 *	others - FIXED
3632 	 */
3633 	switch (hxge_msi_enable) {
3634 	default:
3635 		type = DDI_INTR_TYPE_FIXED;
3636 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3637 		    "use fixed (intx emulation) type %08x", type));
3638 		break;
3639 
3640 	case 2:
3641 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3642 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3643 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3644 			type = DDI_INTR_TYPE_MSIX;
3645 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3646 			    "==> hxge_add_intrs: "
3647 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3648 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3649 			type = DDI_INTR_TYPE_MSI;
3650 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3651 			    "==> hxge_add_intrs: "
3652 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3653 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3654 			type = DDI_INTR_TYPE_FIXED;
3655 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3656 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3657 		}
3658 		break;
3659 
3660 	case 1:
3661 		if (intr_types & DDI_INTR_TYPE_MSI) {
3662 			type = DDI_INTR_TYPE_MSI;
3663 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3664 			    "==> hxge_add_intrs: "
3665 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3666 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3667 			type = DDI_INTR_TYPE_MSIX;
3668 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3669 			    "==> hxge_add_intrs: "
3670 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3671 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3672 			type = DDI_INTR_TYPE_FIXED;
3673 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3674 			    "==> hxge_add_intrs: "
3675 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3676 		}
3677 	}
3678 
3679 	hxgep->hxge_intr_type.intr_type = type;
3680 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3681 	    type == DDI_INTR_TYPE_FIXED) &&
3682 	    hxgep->hxge_intr_type.niu_msi_enable) {
3683 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3684 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3685 			    " hxge_add_intrs: "
3686 			    " hxge_add_intrs_adv failed: status 0x%08x",
3687 			    status));
3688 			return (status);
3689 		} else {
3690 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3691 			    "interrupts registered : type %d", type));
3692 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3693 
3694 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3695 			    "\nAdded advanced hxge add_intr_adv "
3696 			    "intr type 0x%x\n", type));
3697 
3698 			return (status);
3699 		}
3700 	}
3701 
3702 	if (!hxgep->hxge_intr_type.intr_registered) {
3703 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3704 		    "==> hxge_add_intrs: failed to register interrupts"));
3705 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3706 	}
3707 
3708 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3709 
3710 	return (status);
3711 }
3712 
3713 /*ARGSUSED*/
3714 static hxge_status_t
3715 hxge_add_intrs_adv(p_hxge_t hxgep)
3716 {
3717 	int		intr_type;
3718 	p_hxge_intr_t	intrp;
3719 	hxge_status_t	status;
3720 
3721 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3722 
3723 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3724 	intr_type = intrp->intr_type;
3725 
3726 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3727 	    intr_type));
3728 
3729 	switch (intr_type) {
3730 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3731 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3732 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3733 		break;
3734 
3735 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3736 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3737 		break;
3738 
3739 	default:
3740 		status = HXGE_ERROR;
3741 		break;
3742 	}
3743 
3744 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3745 
3746 	return (status);
3747 }
3748 
3749 /*ARGSUSED*/
3750 static hxge_status_t
3751 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3752 {
3753 	dev_info_t	*dip = hxgep->dip;
3754 	p_hxge_ldg_t	ldgp;
3755 	p_hxge_intr_t	intrp;
3756 	uint_t		*inthandler;
3757 	void		*arg1, *arg2;
3758 	int		behavior;
3759 	int		nintrs, navail;
3760 	int		nactual, nrequired, nrequest;
3761 	int		inum = 0;
3762 	int		loop = 0;
3763 	int		x, y;
3764 	int		ddi_status = DDI_SUCCESS;
3765 	hxge_status_t	status = HXGE_OK;
3766 
3767 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3768 
3769 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3770 
3771 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3772 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3773 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3774 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3775 		    "nintrs: %d", ddi_status, nintrs));
3776 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3777 	}
3778 
3779 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3780 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3781 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3782 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3783 		    "nintrs: %d", ddi_status, navail));
3784 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3785 	}
3786 
3787 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3788 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3789 	    int_type, nintrs, navail));
3790 
3791 	/* PSARC/2007/453 MSI-X interrupt limit override */
3792 	if (int_type == DDI_INTR_TYPE_MSIX) {
3793 		nrequest = hxge_create_msi_property(hxgep);
3794 		if (nrequest < navail) {
3795 			navail = nrequest;
3796 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3797 			    "hxge_add_intrs_adv_type: nintrs %d "
3798 			    "navail %d (nrequest %d)",
3799 			    nintrs, navail, nrequest));
3800 		}
3801 	}
3802 
3803 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3804 		/* MSI must be power of 2 */
3805 		if ((navail & 16) == 16) {
3806 			navail = 16;
3807 		} else if ((navail & 8) == 8) {
3808 			navail = 8;
3809 		} else if ((navail & 4) == 4) {
3810 			navail = 4;
3811 		} else if ((navail & 2) == 2) {
3812 			navail = 2;
3813 		} else {
3814 			navail = 1;
3815 		}
3816 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3817 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3818 		    "navail %d", nintrs, navail));
3819 	}
3820 
3821 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3822 	    "requesting: intr type %d nintrs %d, navail %d",
3823 	    int_type, nintrs, navail));
3824 
3825 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3826 	    DDI_INTR_ALLOC_NORMAL);
3827 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3828 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3829 
3830 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3831 	    navail, &nactual, behavior);
3832 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3833 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3834 		    " ddi_intr_alloc() failed: %d", ddi_status));
3835 		kmem_free(intrp->htable, intrp->intr_size);
3836 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3837 	}
3838 
3839 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3840 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3841 	    navail, nactual));
3842 
3843 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3844 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3845 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3846 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3847 		/* Free already allocated interrupts */
3848 		for (y = 0; y < nactual; y++) {
3849 			(void) ddi_intr_free(intrp->htable[y]);
3850 		}
3851 
3852 		kmem_free(intrp->htable, intrp->intr_size);
3853 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3854 	}
3855 
3856 	nrequired = 0;
3857 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3858 	if (status != HXGE_OK) {
3859 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3860 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3861 		    "failed: 0x%x", status));
3862 		/* Free already allocated interrupts */
3863 		for (y = 0; y < nactual; y++) {
3864 			(void) ddi_intr_free(intrp->htable[y]);
3865 		}
3866 
3867 		kmem_free(intrp->htable, intrp->intr_size);
3868 		return (status);
3869 	}
3870 
3871 	ldgp = hxgep->ldgvp->ldgp;
3872 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3873 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3874 
3875 	if (nactual < nrequired)
3876 		loop = nactual;
3877 	else
3878 		loop = nrequired;
3879 
3880 	for (x = 0; x < loop; x++, ldgp++) {
3881 		ldgp->vector = (uint8_t)x;
3882 		arg1 = ldgp->ldvp;
3883 		arg2 = hxgep;
3884 		if (ldgp->nldvs == 1) {
3885 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3886 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3887 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3888 			    "1-1 int handler (entry %d)\n",
3889 			    arg1, arg2, x));
3890 		} else if (ldgp->nldvs > 1) {
3891 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3892 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3893 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3894 			    "nldevs %d int handler (entry %d)\n",
3895 			    arg1, arg2, ldgp->nldvs, x));
3896 		}
3897 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3898 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3899 		    "htable 0x%llx", x, intrp->htable[x]));
3900 
3901 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3902 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3903 		    DDI_SUCCESS) {
3904 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3905 			    "==> hxge_add_intrs_adv_type: failed #%d "
3906 			    "status 0x%x", x, ddi_status));
3907 			for (y = 0; y < intrp->intr_added; y++) {
3908 				(void) ddi_intr_remove_handler(
3909 				    intrp->htable[y]);
3910 			}
3911 
3912 			/* Free already allocated intr */
3913 			for (y = 0; y < nactual; y++) {
3914 				(void) ddi_intr_free(intrp->htable[y]);
3915 			}
3916 			kmem_free(intrp->htable, intrp->intr_size);
3917 
3918 			(void) hxge_ldgv_uninit(hxgep);
3919 
3920 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3921 		}
3922 
3923 		intrp->intr_added++;
3924 	}
3925 	intrp->msi_intx_cnt = nactual;
3926 
3927 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3928 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3929 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3930 
3931 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3932 	(void) hxge_intr_ldgv_init(hxgep);
3933 
3934 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3935 
3936 	return (status);
3937 }
3938 
3939 /*ARGSUSED*/
3940 static hxge_status_t
3941 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3942 {
3943 	dev_info_t	*dip = hxgep->dip;
3944 	p_hxge_ldg_t	ldgp;
3945 	p_hxge_intr_t	intrp;
3946 	uint_t		*inthandler;
3947 	void		*arg1, *arg2;
3948 	int		behavior;
3949 	int		nintrs, navail;
3950 	int		nactual, nrequired;
3951 	int		inum = 0;
3952 	int		x, y;
3953 	int		ddi_status = DDI_SUCCESS;
3954 	hxge_status_t	status = HXGE_OK;
3955 
3956 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3957 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3958 
3959 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3960 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3961 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3962 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3963 		    "nintrs: %d", status, nintrs));
3964 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3965 	}
3966 
3967 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3968 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3969 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3970 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3971 		    "nintrs: %d", ddi_status, navail));
3972 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3973 	}
3974 
3975 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3976 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3977 	    nintrs, navail));
3978 
3979 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3980 	    DDI_INTR_ALLOC_NORMAL);
3981 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3982 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
3983 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3984 	    navail, &nactual, behavior);
3985 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3986 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3987 		    " ddi_intr_alloc() failed: %d", ddi_status));
3988 		kmem_free(intrp->htable, intrp->intr_size);
3989 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3990 	}
3991 
3992 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3993 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3994 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3995 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3996 		/* Free already allocated interrupts */
3997 		for (y = 0; y < nactual; y++) {
3998 			(void) ddi_intr_free(intrp->htable[y]);
3999 		}
4000 
4001 		kmem_free(intrp->htable, intrp->intr_size);
4002 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4003 	}
4004 
4005 	nrequired = 0;
4006 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4007 	if (status != HXGE_OK) {
4008 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4009 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4010 		    "failed: 0x%x", status));
4011 		/* Free already allocated interrupts */
4012 		for (y = 0; y < nactual; y++) {
4013 			(void) ddi_intr_free(intrp->htable[y]);
4014 		}
4015 
4016 		kmem_free(intrp->htable, intrp->intr_size);
4017 		return (status);
4018 	}
4019 
4020 	ldgp = hxgep->ldgvp->ldgp;
4021 	for (x = 0; x < nrequired; x++, ldgp++) {
4022 		ldgp->vector = (uint8_t)x;
4023 		arg1 = ldgp->ldvp;
4024 		arg2 = hxgep;
4025 		if (ldgp->nldvs == 1) {
4026 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4027 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4028 			    "hxge_add_intrs_adv_type_fix: "
4029 			    "1-1 int handler(%d) ldg %d ldv %d "
4030 			    "arg1 $%p arg2 $%p\n",
4031 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4032 		} else if (ldgp->nldvs > 1) {
4033 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4034 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4035 			    "hxge_add_intrs_adv_type_fix: "
4036 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
4037 			    "arg1 0x%016llx arg2 0x%016llx\n",
4038 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4039 			    arg1, arg2));
4040 		}
4041 
4042 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4043 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4044 		    DDI_SUCCESS) {
4045 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4046 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
4047 			    "status 0x%x", x, ddi_status));
4048 			for (y = 0; y < intrp->intr_added; y++) {
4049 				(void) ddi_intr_remove_handler(
4050 				    intrp->htable[y]);
4051 			}
4052 			for (y = 0; y < nactual; y++) {
4053 				(void) ddi_intr_free(intrp->htable[y]);
4054 			}
4055 			/* Free already allocated intr */
4056 			kmem_free(intrp->htable, intrp->intr_size);
4057 
4058 			(void) hxge_ldgv_uninit(hxgep);
4059 
4060 			return (HXGE_ERROR | HXGE_DDI_FAILED);
4061 		}
4062 		intrp->intr_added++;
4063 	}
4064 
4065 	intrp->msi_intx_cnt = nactual;
4066 
4067 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4068 
4069 	status = hxge_intr_ldgv_init(hxgep);
4070 
4071 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4072 
4073 	return (status);
4074 }
4075 
4076 /*ARGSUSED*/
4077 static void
4078 hxge_remove_intrs(p_hxge_t hxgep)
4079 {
4080 	int		i, inum;
4081 	p_hxge_intr_t	intrp;
4082 
4083 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4084 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4085 	if (!intrp->intr_registered) {
4086 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4087 		    "<== hxge_remove_intrs: interrupts not registered"));
4088 		return;
4089 	}
4090 
4091 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4092 
4093 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4094 		(void) ddi_intr_block_disable(intrp->htable,
4095 		    intrp->intr_added);
4096 	} else {
4097 		for (i = 0; i < intrp->intr_added; i++) {
4098 			(void) ddi_intr_disable(intrp->htable[i]);
4099 		}
4100 	}
4101 
4102 	for (inum = 0; inum < intrp->intr_added; inum++) {
4103 		if (intrp->htable[inum]) {
4104 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
4105 		}
4106 	}
4107 
4108 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4109 		if (intrp->htable[inum]) {
4110 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4111 			    "hxge_remove_intrs: ddi_intr_free inum %d "
4112 			    "msi_intx_cnt %d intr_added %d",
4113 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
4114 
4115 			(void) ddi_intr_free(intrp->htable[inum]);
4116 		}
4117 	}
4118 
4119 	kmem_free(intrp->htable, intrp->intr_size);
4120 	intrp->intr_registered = B_FALSE;
4121 	intrp->intr_enabled = B_FALSE;
4122 	intrp->msi_intx_cnt = 0;
4123 	intrp->intr_added = 0;
4124 
4125 	(void) hxge_ldgv_uninit(hxgep);
4126 
4127 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4128 }
4129 
4130 /*ARGSUSED*/
4131 void
4132 hxge_intrs_enable(p_hxge_t hxgep)
4133 {
4134 	p_hxge_intr_t	intrp;
4135 	int		i;
4136 	int		status;
4137 
4138 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4139 
4140 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4141 
4142 	if (!intrp->intr_registered) {
4143 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4144 		    "interrupts are not registered"));
4145 		return;
4146 	}
4147 
4148 	if (intrp->intr_enabled) {
4149 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4150 		    "<== hxge_intrs_enable: already enabled"));
4151 		return;
4152 	}
4153 
4154 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4155 		status = ddi_intr_block_enable(intrp->htable,
4156 		    intrp->intr_added);
4157 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4158 		    "block enable - status 0x%x total inums #%d\n",
4159 		    status, intrp->intr_added));
4160 	} else {
4161 		for (i = 0; i < intrp->intr_added; i++) {
4162 			status = ddi_intr_enable(intrp->htable[i]);
4163 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4164 			    "ddi_intr_enable:enable - status 0x%x "
4165 			    "total inums %d enable inum #%d\n",
4166 			    status, intrp->intr_added, i));
4167 			if (status == DDI_SUCCESS) {
4168 				intrp->intr_enabled = B_TRUE;
4169 			}
4170 		}
4171 	}
4172 
4173 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4174 }
4175 
4176 /*ARGSUSED*/
4177 static void
4178 hxge_intrs_disable(p_hxge_t hxgep)
4179 {
4180 	p_hxge_intr_t	intrp;
4181 	int		i;
4182 
4183 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4184 
4185 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4186 
4187 	if (!intrp->intr_registered) {
4188 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4189 		    "interrupts are not registered"));
4190 		return;
4191 	}
4192 
4193 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4194 		(void) ddi_intr_block_disable(intrp->htable,
4195 		    intrp->intr_added);
4196 	} else {
4197 		for (i = 0; i < intrp->intr_added; i++) {
4198 			(void) ddi_intr_disable(intrp->htable[i]);
4199 		}
4200 	}
4201 
4202 	intrp->intr_enabled = B_FALSE;
4203 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4204 }
4205 
4206 static hxge_status_t
4207 hxge_mac_register(p_hxge_t hxgep)
4208 {
4209 	mac_register_t	*macp;
4210 	int		status;
4211 
4212 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4213 
4214 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4215 		return (HXGE_ERROR);
4216 
4217 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4218 	macp->m_driver = hxgep;
4219 	macp->m_dip = hxgep->dip;
4220 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4221 	macp->m_callbacks = &hxge_m_callbacks;
4222 	macp->m_min_sdu = 0;
4223 	macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4224 	macp->m_margin = VLAN_TAGSZ;
4225 	macp->m_priv_props = hxge_priv_props;
4226 	macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS;
4227 	macp->m_v12n = MAC_VIRT_LEVEL1;
4228 
4229 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4230 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4231 	    macp->m_src_addr[0],
4232 	    macp->m_src_addr[1],
4233 	    macp->m_src_addr[2],
4234 	    macp->m_src_addr[3],
4235 	    macp->m_src_addr[4],
4236 	    macp->m_src_addr[5]));
4237 
4238 	status = mac_register(macp, &hxgep->mach);
4239 	mac_free(macp);
4240 
4241 	if (status != 0) {
4242 		cmn_err(CE_WARN,
4243 		    "hxge_mac_register failed (status %d instance %d)",
4244 		    status, hxgep->instance);
4245 		return (HXGE_ERROR);
4246 	}
4247 
4248 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4249 	    "(instance %d)", hxgep->instance));
4250 
4251 	return (HXGE_OK);
4252 }
4253 
4254 static int
4255 hxge_init_common_dev(p_hxge_t hxgep)
4256 {
4257 	p_hxge_hw_list_t	hw_p;
4258 	dev_info_t		*p_dip;
4259 
4260 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4261 
4262 	p_dip = hxgep->p_dip;
4263 	MUTEX_ENTER(&hxge_common_lock);
4264 
4265 	/*
4266 	 * Loop through existing per Hydra hardware list.
4267 	 */
4268 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4269 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4270 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4271 		    hw_p, p_dip));
4272 		if (hw_p->parent_devp == p_dip) {
4273 			hxgep->hxge_hw_p = hw_p;
4274 			hw_p->ndevs++;
4275 			hw_p->hxge_p = hxgep;
4276 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4277 			    "==> hxge_init_common_device: "
4278 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4279 			    hw_p, p_dip, hw_p->ndevs));
4280 			break;
4281 		}
4282 	}
4283 
4284 	if (hw_p == NULL) {
4285 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4286 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4287 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4288 		hw_p->parent_devp = p_dip;
4289 		hw_p->magic = HXGE_MAGIC;
4290 		hxgep->hxge_hw_p = hw_p;
4291 		hw_p->ndevs++;
4292 		hw_p->hxge_p = hxgep;
4293 		hw_p->next = hxge_hw_list;
4294 
4295 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4296 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4297 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4298 
4299 		hxge_hw_list = hw_p;
4300 	}
4301 	MUTEX_EXIT(&hxge_common_lock);
4302 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4303 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4304 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4305 
4306 	return (HXGE_OK);
4307 }
4308 
4309 static void
4310 hxge_uninit_common_dev(p_hxge_t hxgep)
4311 {
4312 	p_hxge_hw_list_t	hw_p, h_hw_p;
4313 	dev_info_t		*p_dip;
4314 
4315 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4316 	if (hxgep->hxge_hw_p == NULL) {
4317 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4318 		    "<== hxge_uninit_common_dev (no common)"));
4319 		return;
4320 	}
4321 
4322 	MUTEX_ENTER(&hxge_common_lock);
4323 	h_hw_p = hxge_hw_list;
4324 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4325 		p_dip = hw_p->parent_devp;
4326 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4327 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4328 		    hw_p->magic == HXGE_MAGIC) {
4329 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4330 			    "==> hxge_uninit_common_dev: "
4331 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4332 			    hw_p, p_dip, hw_p->ndevs));
4333 
4334 			hxgep->hxge_hw_p = NULL;
4335 			if (hw_p->ndevs) {
4336 				hw_p->ndevs--;
4337 			}
4338 			hw_p->hxge_p = NULL;
4339 			if (!hw_p->ndevs) {
4340 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4341 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4342 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4343 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4344 				    "==> hxge_uninit_common_dev: "
4345 				    "hw_p $%p parent dip $%p ndevs %d (last)",
4346 				    hw_p, p_dip, hw_p->ndevs));
4347 
4348 				if (hw_p == hxge_hw_list) {
4349 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4350 					    "==> hxge_uninit_common_dev:"
4351 					    "remove head "
4352 					    "hw_p $%p parent dip $%p "
4353 					    "ndevs %d (head)",
4354 					    hw_p, p_dip, hw_p->ndevs));
4355 					hxge_hw_list = hw_p->next;
4356 				} else {
4357 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4358 					    "==> hxge_uninit_common_dev:"
4359 					    "remove middle "
4360 					    "hw_p $%p parent dip $%p "
4361 					    "ndevs %d (middle)",
4362 					    hw_p, p_dip, hw_p->ndevs));
4363 					h_hw_p->next = hw_p->next;
4364 				}
4365 
4366 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4367 			}
4368 			break;
4369 		} else {
4370 			h_hw_p = hw_p;
4371 		}
4372 	}
4373 
4374 	MUTEX_EXIT(&hxge_common_lock);
4375 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4376 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4377 
4378 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4379 }
4380 
4381 #define	HXGE_MSIX_ENTRIES		32
4382 #define	HXGE_MSIX_WAIT_COUNT		10
4383 #define	HXGE_MSIX_PARITY_CHECK_COUNT	30
4384 
4385 static void
4386 hxge_link_poll(void *arg)
4387 {
4388 	p_hxge_t		hxgep = (p_hxge_t)arg;
4389 	hpi_handle_t		handle;
4390 	cip_link_stat_t		link_stat;
4391 	hxge_timeout		*to = &hxgep->timeout;
4392 
4393 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
4394 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4395 
4396 	if (to->report_link_status ||
4397 	    (to->link_status != link_stat.bits.xpcs0_link_up)) {
4398 		to->link_status = link_stat.bits.xpcs0_link_up;
4399 		to->report_link_status = B_FALSE;
4400 
4401 		if (link_stat.bits.xpcs0_link_up) {
4402 			hxge_link_update(hxgep, LINK_STATE_UP);
4403 		} else {
4404 			hxge_link_update(hxgep, LINK_STATE_DOWN);
4405 		}
4406 	}
4407 
4408 	if (hxgep->msix_count++ >= HXGE_MSIX_PARITY_CHECK_COUNT) {
4409 		hxgep->msix_count = 0;
4410 		hxgep->msix_index++;
4411 		if (hxgep->msix_index >= HXGE_MSIX_ENTRIES)
4412 			hxgep->msix_index = 0;
4413 		hxge_check_1entry_msix_table(hxgep, hxgep->msix_index);
4414 	}
4415 
4416 	/* Restart the link status timer to check the link status */
4417 	MUTEX_ENTER(&to->lock);
4418 	to->id = timeout(hxge_link_poll, arg, to->ticks);
4419 	MUTEX_EXIT(&to->lock);
4420 }
4421 
4422 static void
4423 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4424 {
4425 	p_hxge_stats_t		statsp = (p_hxge_stats_t)hxgep->statsp;
4426 
4427 	mac_link_update(hxgep->mach, state);
4428 	if (state == LINK_STATE_UP) {
4429 		statsp->mac_stats.link_speed = 10000;
4430 		statsp->mac_stats.link_duplex = 2;
4431 		statsp->mac_stats.link_up = 1;
4432 	} else {
4433 		statsp->mac_stats.link_speed = 0;
4434 		statsp->mac_stats.link_duplex = 0;
4435 		statsp->mac_stats.link_up = 0;
4436 	}
4437 }
4438 
4439 static void
4440 hxge_msix_init(p_hxge_t hxgep)
4441 {
4442 	uint32_t 		data0;
4443 	uint32_t 		data1;
4444 	uint32_t 		data2;
4445 	int			i;
4446 	uint32_t		msix_entry0;
4447 	uint32_t		msix_entry1;
4448 	uint32_t		msix_entry2;
4449 	uint32_t		msix_entry3;
4450 
4451 	/* Change to use MSIx bar instead of indirect access */
4452 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4453 		data0 = 0xffffffff - i;
4454 		data1 = 0xffffffff - i - 1;
4455 		data2 = 0xffffffff - i - 2;
4456 
4457 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4458 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4459 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4460 	}
4461 
4462 	/* Initialize ram data out buffer. */
4463 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4464 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4465 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4466 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4467 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4468 	}
4469 }
4470 
4471 static void
4472 hxge_store_msix_table(p_hxge_t hxgep)
4473 {
4474 	int			i;
4475 	uint32_t		msix_entry0;
4476 	uint32_t		msix_entry1;
4477 	uint32_t		msix_entry2;
4478 
4479 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4480 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4481 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4,
4482 		    &msix_entry1);
4483 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8,
4484 		    &msix_entry2);
4485 
4486 		hxgep->msix_table[i][0] = msix_entry0;
4487 		hxgep->msix_table[i][1] = msix_entry1;
4488 		hxgep->msix_table[i][2] = msix_entry2;
4489 	}
4490 }
4491 
4492 static void
4493 hxge_check_1entry_msix_table(p_hxge_t hxgep, int i)
4494 {
4495 	uint32_t		msix_entry0;
4496 	uint32_t		msix_entry1;
4497 	uint32_t		msix_entry2;
4498 	p_hxge_peu_sys_stats_t	statsp;
4499 
4500 	statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
4501 
4502 	HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4503 	HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4504 	HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4505 
4506 	hxgep->msix_table_check[i][0] = msix_entry0;
4507 	hxgep->msix_table_check[i][1] = msix_entry1;
4508 	hxgep->msix_table_check[i][2] = msix_entry2;
4509 
4510 	if ((hxgep->msix_table[i][0] != hxgep->msix_table_check[i][0]) ||
4511 	    (hxgep->msix_table[i][1] != hxgep->msix_table_check[i][1]) ||
4512 	    (hxgep->msix_table[i][2] != hxgep->msix_table_check[i][2])) {
4513 		statsp->eic_msix_parerr++;
4514 		if (statsp->eic_msix_parerr == 1) {
4515 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4516 			    "==> hxge_check_1entry_msix_table: "
4517 			    "eic_msix_parerr at index: %d", i));
4518 			HXGE_FM_REPORT_ERROR(hxgep, NULL,
4519 			    HXGE_FM_EREPORT_PEU_ERR);
4520 		}
4521 	}
4522 }
4523 
4524 /*
4525  * The following function is to support
4526  * PSARC/2007/453 MSI-X interrupt limit override.
4527  */
4528 static int
4529 hxge_create_msi_property(p_hxge_t hxgep)
4530 {
4531 	int	nmsi;
4532 	extern	int ncpus;
4533 
4534 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4535 
4536 	(void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4537 	    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4538 	/*
4539 	 * The maximum MSI-X requested will be 8.
4540 	 * If the # of CPUs is less than 8, we will reqeust
4541 	 * # MSI-X based on the # of CPUs.
4542 	 */
4543 	if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4544 		nmsi = HXGE_MSIX_REQUEST_10G;
4545 	} else {
4546 		nmsi = ncpus;
4547 	}
4548 
4549 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4550 	    "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4551 	    ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4552 	    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4553 
4554 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4555 	return (nmsi);
4556 }
4557