xref: /titanic_50/usr/src/uts/common/io/hxge/hxge_main.c (revision ead1f93ee620d7580f7e53350fe5a884fc4f158a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28  */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31 
32 /*
33  * PSARC/2007/453 MSI-X interrupt limit override
34  * (This PSARC case is limited to MSI-X vectors
35  *  and SPARC platforms only).
36  */
37 uint32_t hxge_msi_enable = 2;
38 
39 /*
40  * Globals: tunable parameters (/etc/system or adb)
41  *
42  */
43 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
44 uint32_t hxge_rbr_spare_size = 0;
45 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
46 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
47 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
48 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
49 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
50 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
51 
52 static hxge_os_mutex_t hxgedebuglock;
53 static int hxge_debug_init = 0;
54 
55 /*
56  * Debugging flags:
57  *		hxge_no_tx_lb : transmit load balancing
58  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
59  *				   1 - From the Stack
60  *				   2 - Destination IP Address
61  */
62 uint32_t hxge_no_tx_lb = 0;
63 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
64 
65 /*
66  * Tunables to manage the receive buffer blocks.
67  *
68  * hxge_rx_threshold_hi: copy all buffers.
69  * hxge_rx_bcopy_size_type: receive buffer block size type.
70  * hxge_rx_threshold_lo: copy only up to tunable block size type.
71  */
72 #if defined(__sparc)
73 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
74 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
75 #else
76 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
77 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
78 #endif
79 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
80 
81 rtrace_t hpi_rtracebuf;
82 
83 /*
84  * Function Prototypes
85  */
86 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
87 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
88 static void hxge_unattach(p_hxge_t);
89 
90 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
91 
92 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
93 static void hxge_destroy_mutexes(p_hxge_t);
94 
95 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
96 static void hxge_unmap_regs(p_hxge_t hxgep);
97 
98 static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
99 static void hxge_remove_intrs(p_hxge_t hxgep);
100 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
101 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
102 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
103 static void hxge_intrs_enable(p_hxge_t hxgep);
104 static void hxge_intrs_disable(p_hxge_t hxgep);
105 static void hxge_suspend(p_hxge_t);
106 static hxge_status_t hxge_resume(p_hxge_t);
107 static hxge_status_t hxge_setup_dev(p_hxge_t);
108 static void hxge_destroy_dev(p_hxge_t);
109 static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
110 static void hxge_free_mem_pool(p_hxge_t);
111 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
112 static void hxge_free_rx_mem_pool(p_hxge_t);
113 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
114 static void hxge_free_tx_mem_pool(p_hxge_t);
115 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
116     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
117     p_hxge_dma_common_t);
118 static void hxge_dma_mem_free(p_hxge_dma_common_t);
119 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
120     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
121 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
122 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
123     p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
124 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
125 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
126     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
127 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
128 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
129     p_hxge_dma_common_t *, size_t);
130 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
131 static int hxge_init_common_dev(p_hxge_t);
132 static void hxge_uninit_common_dev(p_hxge_t);
133 
134 /*
135  * The next declarations are for the GLDv3 interface.
136  */
137 static int hxge_m_start(void *);
138 static void hxge_m_stop(void *);
139 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
140 static int hxge_m_promisc(void *, boolean_t);
141 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
142 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
143 
144 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
145 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
146 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
147     uint_t pr_valsize, const void *pr_val);
148 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
149     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *);
150 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num,
151     uint_t pr_valsize, void *pr_val);
152 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
153     uint_t pr_valsize, const void *pr_val);
154 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
155     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
156 static void hxge_link_poll(void *arg);
157 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
158 static void hxge_msix_init(p_hxge_t hxgep);
159 
160 mac_priv_prop_t hxge_priv_props[] = {
161 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
162 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
163 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
164 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
165 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
166 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
167 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
168 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
169 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
170 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
171 };
172 
173 #define	HXGE_MAX_PRIV_PROPS	\
174 	(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
175 
176 #define	HXGE_MAGIC	0x4E584745UL
177 #define	MAX_DUMP_SZ 256
178 
179 #define	HXGE_M_CALLBACK_FLAGS	\
180 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
181 
182 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
183 
184 static mac_callbacks_t hxge_m_callbacks = {
185 	HXGE_M_CALLBACK_FLAGS,
186 	hxge_m_stat,
187 	hxge_m_start,
188 	hxge_m_stop,
189 	hxge_m_promisc,
190 	hxge_m_multicst,
191 	NULL,
192 	NULL,
193 	hxge_m_ioctl,
194 	hxge_m_getcapab,
195 	NULL,
196 	NULL,
197 	hxge_m_setprop,
198 	hxge_m_getprop
199 };
200 
201 /* PSARC/2007/453 MSI-X interrupt limit override. */
202 #define	HXGE_MSIX_REQUEST_10G	8
203 static int hxge_create_msi_property(p_hxge_t);
204 
205 /* Enable debug messages as necessary. */
206 uint64_t hxge_debug_level = 0;
207 
208 /*
209  * This list contains the instance structures for the Hydra
210  * devices present in the system. The lock exists to guarantee
211  * mutually exclusive access to the list.
212  */
213 void *hxge_list = NULL;
214 void *hxge_hw_list = NULL;
215 hxge_os_mutex_t hxge_common_lock;
216 
217 extern uint64_t hpi_debug_level;
218 
219 extern hxge_status_t hxge_ldgv_init();
220 extern hxge_status_t hxge_ldgv_uninit();
221 extern hxge_status_t hxge_intr_ldgv_init();
222 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
223     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
224 extern void hxge_fm_fini(p_hxge_t hxgep);
225 
226 /*
227  * Count used to maintain the number of buffers being used
228  * by Hydra instances and loaned up to the upper layers.
229  */
230 uint32_t hxge_mblks_pending = 0;
231 
232 /*
233  * Device register access attributes for PIO.
234  */
235 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
236 	DDI_DEVICE_ATTR_V0,
237 	DDI_STRUCTURE_LE_ACC,
238 	DDI_STRICTORDER_ACC,
239 };
240 
241 /*
242  * Device descriptor access attributes for DMA.
243  */
244 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
245 	DDI_DEVICE_ATTR_V0,
246 	DDI_STRUCTURE_LE_ACC,
247 	DDI_STRICTORDER_ACC
248 };
249 
250 /*
251  * Device buffer access attributes for DMA.
252  */
253 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
254 	DDI_DEVICE_ATTR_V0,
255 	DDI_STRUCTURE_BE_ACC,
256 	DDI_STRICTORDER_ACC
257 };
258 
259 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
260 	DMA_ATTR_V0,		/* version number. */
261 	0,			/* low address */
262 	0xffffffffffffffff,	/* high address */
263 	0xffffffffffffffff,	/* address counter max */
264 	0x80000,		/* alignment */
265 	0xfc00fc,		/* dlim_burstsizes */
266 	0x1,			/* minimum transfer size */
267 	0xffffffffffffffff,	/* maximum transfer size */
268 	0xffffffffffffffff,	/* maximum segment size */
269 	1,			/* scatter/gather list length */
270 	(unsigned int)1,	/* granularity */
271 	0			/* attribute flags */
272 };
273 
274 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
275 	DMA_ATTR_V0,		/* version number. */
276 	0,			/* low address */
277 	0xffffffffffffffff,	/* high address */
278 	0xffffffffffffffff,	/* address counter max */
279 	0x100000,		/* alignment */
280 	0xfc00fc,		/* dlim_burstsizes */
281 	0x1,			/* minimum transfer size */
282 	0xffffffffffffffff,	/* maximum transfer size */
283 	0xffffffffffffffff,	/* maximum segment size */
284 	1,			/* scatter/gather list length */
285 	(unsigned int)1,	/* granularity */
286 	0			/* attribute flags */
287 };
288 
289 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
290 	DMA_ATTR_V0,		/* version number. */
291 	0,			/* low address */
292 	0xffffffffffffffff,	/* high address */
293 	0xffffffffffffffff,	/* address counter max */
294 	0x40000,		/* alignment */
295 	0xfc00fc,		/* dlim_burstsizes */
296 	0x1,			/* minimum transfer size */
297 	0xffffffffffffffff,	/* maximum transfer size */
298 	0xffffffffffffffff,	/* maximum segment size */
299 	1,			/* scatter/gather list length */
300 	(unsigned int)1,	/* granularity */
301 	0			/* attribute flags */
302 };
303 
304 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
305 	DMA_ATTR_V0,		/* version number. */
306 	0,			/* low address */
307 	0xffffffffffffffff,	/* high address */
308 	0xffffffffffffffff,	/* address counter max */
309 #if defined(_BIG_ENDIAN)
310 	0x2000,			/* alignment */
311 #else
312 	0x1000,			/* alignment */
313 #endif
314 	0xfc00fc,		/* dlim_burstsizes */
315 	0x1,			/* minimum transfer size */
316 	0xffffffffffffffff,	/* maximum transfer size */
317 	0xffffffffffffffff,	/* maximum segment size */
318 	5,			/* scatter/gather list length */
319 	(unsigned int)1,	/* granularity */
320 	0			/* attribute flags */
321 };
322 
323 ddi_dma_attr_t hxge_tx_dma_attr = {
324 	DMA_ATTR_V0,		/* version number. */
325 	0,			/* low address */
326 	0xffffffffffffffff,	/* high address */
327 	0xffffffffffffffff,	/* address counter max */
328 #if defined(_BIG_ENDIAN)
329 	0x2000,			/* alignment */
330 #else
331 	0x1000,			/* alignment */
332 #endif
333 	0xfc00fc,		/* dlim_burstsizes */
334 	0x1,			/* minimum transfer size */
335 	0xffffffffffffffff,	/* maximum transfer size */
336 	0xffffffffffffffff,	/* maximum segment size */
337 	5,			/* scatter/gather list length */
338 	(unsigned int)1,	/* granularity */
339 	0			/* attribute flags */
340 };
341 
342 ddi_dma_attr_t hxge_rx_dma_attr = {
343 	DMA_ATTR_V0,		/* version number. */
344 	0,			/* low address */
345 	0xffffffffffffffff,	/* high address */
346 	0xffffffffffffffff,	/* address counter max */
347 	0x10000,		/* alignment */
348 	0xfc00fc,		/* dlim_burstsizes */
349 	0x1,			/* minimum transfer size */
350 	0xffffffffffffffff,	/* maximum transfer size */
351 	0xffffffffffffffff,	/* maximum segment size */
352 	1,			/* scatter/gather list length */
353 	(unsigned int)1,	/* granularity */
354 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
355 };
356 
357 ddi_dma_lim_t hxge_dma_limits = {
358 	(uint_t)0,		/* dlim_addr_lo */
359 	(uint_t)0xffffffff,	/* dlim_addr_hi */
360 	(uint_t)0xffffffff,	/* dlim_cntr_max */
361 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
362 	0x1,			/* dlim_minxfer */
363 	1024			/* dlim_speed */
364 };
365 
366 dma_method_t hxge_force_dma = DVMA;
367 
368 /*
369  * dma chunk sizes.
370  *
371  * Try to allocate the largest possible size
372  * so that fewer number of dma chunks would be managed
373  */
374 size_t alloc_sizes[] = {
375     0x1000, 0x2000, 0x4000, 0x8000,
376     0x10000, 0x20000, 0x40000, 0x80000,
377     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
378 };
379 
380 /*
381  * Translate "dev_t" to a pointer to the associated "dev_info_t".
382  */
383 static int
384 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
385 {
386 	p_hxge_t	hxgep = NULL;
387 	int		instance;
388 	int		status = DDI_SUCCESS;
389 	int		i;
390 
391 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
392 
393 	/*
394 	 * Get the device instance since we'll need to setup or retrieve a soft
395 	 * state for this instance.
396 	 */
397 	instance = ddi_get_instance(dip);
398 
399 	switch (cmd) {
400 	case DDI_ATTACH:
401 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
402 		break;
403 
404 	case DDI_RESUME:
405 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
406 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
407 		if (hxgep == NULL) {
408 			status = DDI_FAILURE;
409 			break;
410 		}
411 		if (hxgep->dip != dip) {
412 			status = DDI_FAILURE;
413 			break;
414 		}
415 		if (hxgep->suspended == DDI_PM_SUSPEND) {
416 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
417 		} else {
418 			(void) hxge_resume(hxgep);
419 		}
420 		goto hxge_attach_exit;
421 
422 	case DDI_PM_RESUME:
423 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
424 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
425 		if (hxgep == NULL) {
426 			status = DDI_FAILURE;
427 			break;
428 		}
429 		if (hxgep->dip != dip) {
430 			status = DDI_FAILURE;
431 			break;
432 		}
433 		(void) hxge_resume(hxgep);
434 		goto hxge_attach_exit;
435 
436 	default:
437 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
438 		status = DDI_FAILURE;
439 		goto hxge_attach_exit;
440 	}
441 
442 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
443 		status = DDI_FAILURE;
444 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
445 		    "ddi_soft_state_zalloc failed"));
446 		goto hxge_attach_exit;
447 	}
448 
449 	hxgep = ddi_get_soft_state(hxge_list, instance);
450 	if (hxgep == NULL) {
451 		status = HXGE_ERROR;
452 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
453 		    "ddi_get_soft_state failed"));
454 		goto hxge_attach_fail2;
455 	}
456 
457 	hxgep->drv_state = 0;
458 	hxgep->dip = dip;
459 	hxgep->instance = instance;
460 	hxgep->p_dip = ddi_get_parent(dip);
461 	hxgep->hxge_debug_level = hxge_debug_level;
462 	hpi_debug_level = hxge_debug_level;
463 
464 	/*
465 	 * Initialize MMAC struture.
466 	 */
467 	(void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
468 	hxgep->mmac.available = hxgep->mmac.total;
469 	for (i = 0; i < hxgep->mmac.total; i++) {
470 		hxgep->mmac.addrs[i].set = B_FALSE;
471 		hxgep->mmac.addrs[i].primary = B_FALSE;
472 	}
473 
474 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
475 	    &hxge_rx_dma_attr);
476 
477 	status = hxge_map_regs(hxgep);
478 	if (status != HXGE_OK) {
479 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
480 		goto hxge_attach_fail3;
481 	}
482 
483 	status = hxge_init_common_dev(hxgep);
484 	if (status != HXGE_OK) {
485 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
486 		    "hxge_init_common_dev failed"));
487 		goto hxge_attach_fail4;
488 	}
489 
490 	/*
491 	 * Setup the Ndd parameters for this instance.
492 	 */
493 	hxge_init_param(hxgep);
494 
495 	/*
496 	 * Setup Register Tracing Buffer.
497 	 */
498 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
499 
500 	/* init stats ptr */
501 	hxge_init_statsp(hxgep);
502 
503 	status = hxge_setup_mutexes(hxgep);
504 	if (status != HXGE_OK) {
505 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
506 		goto hxge_attach_fail;
507 	}
508 
509 	/* Scrub the MSI-X memory */
510 	hxge_msix_init(hxgep);
511 
512 	status = hxge_get_config_properties(hxgep);
513 	if (status != HXGE_OK) {
514 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
515 		goto hxge_attach_fail;
516 	}
517 
518 	/*
519 	 * Setup the Kstats for the driver.
520 	 */
521 	hxge_setup_kstats(hxgep);
522 	hxge_setup_param(hxgep);
523 
524 	status = hxge_setup_system_dma_pages(hxgep);
525 	if (status != HXGE_OK) {
526 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
527 		goto hxge_attach_fail;
528 	}
529 
530 	hxge_hw_id_init(hxgep);
531 	hxge_hw_init_niu_common(hxgep);
532 
533 	status = hxge_setup_dev(hxgep);
534 	if (status != DDI_SUCCESS) {
535 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
536 		goto hxge_attach_fail;
537 	}
538 
539 	status = hxge_add_intrs(hxgep);
540 	if (status != DDI_SUCCESS) {
541 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
542 		goto hxge_attach_fail;
543 	}
544 
545 	/*
546 	 * Enable interrupts.
547 	 */
548 	hxge_intrs_enable(hxgep);
549 
550 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
551 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
552 		    "unable to register to mac layer (%d)", status));
553 		goto hxge_attach_fail;
554 	}
555 	mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
556 
557 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
558 	    instance));
559 
560 	goto hxge_attach_exit;
561 
562 hxge_attach_fail:
563 	hxge_unattach(hxgep);
564 	goto hxge_attach_fail1;
565 
566 hxge_attach_fail5:
567 	/*
568 	 * Tear down the ndd parameters setup.
569 	 */
570 	hxge_destroy_param(hxgep);
571 
572 	/*
573 	 * Tear down the kstat setup.
574 	 */
575 	hxge_destroy_kstats(hxgep);
576 
577 hxge_attach_fail4:
578 	if (hxgep->hxge_hw_p) {
579 		hxge_uninit_common_dev(hxgep);
580 		hxgep->hxge_hw_p = NULL;
581 	}
582 hxge_attach_fail3:
583 	/*
584 	 * Unmap the register setup.
585 	 */
586 	hxge_unmap_regs(hxgep);
587 
588 	hxge_fm_fini(hxgep);
589 
590 hxge_attach_fail2:
591 	ddi_soft_state_free(hxge_list, hxgep->instance);
592 
593 hxge_attach_fail1:
594 	if (status != HXGE_OK)
595 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
596 	hxgep = NULL;
597 
598 hxge_attach_exit:
599 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
600 	    status));
601 
602 	return (status);
603 }
604 
605 static int
606 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
607 {
608 	int		status = DDI_SUCCESS;
609 	int		instance;
610 	p_hxge_t	hxgep = NULL;
611 
612 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
613 	instance = ddi_get_instance(dip);
614 	hxgep = ddi_get_soft_state(hxge_list, instance);
615 	if (hxgep == NULL) {
616 		status = DDI_FAILURE;
617 		goto hxge_detach_exit;
618 	}
619 
620 	switch (cmd) {
621 	case DDI_DETACH:
622 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
623 		break;
624 
625 	case DDI_PM_SUSPEND:
626 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
627 		hxgep->suspended = DDI_PM_SUSPEND;
628 		hxge_suspend(hxgep);
629 		break;
630 
631 	case DDI_SUSPEND:
632 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
633 		if (hxgep->suspended != DDI_PM_SUSPEND) {
634 			hxgep->suspended = DDI_SUSPEND;
635 			hxge_suspend(hxgep);
636 		}
637 		break;
638 
639 	default:
640 		status = DDI_FAILURE;
641 		break;
642 	}
643 
644 	if (cmd != DDI_DETACH)
645 		goto hxge_detach_exit;
646 
647 	/*
648 	 * Stop the xcvr polling.
649 	 */
650 	hxgep->suspended = cmd;
651 
652 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
653 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
654 		    "<== hxge_detach status = 0x%08X", status));
655 		return (DDI_FAILURE);
656 	}
657 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
658 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
659 
660 	hxge_unattach(hxgep);
661 	hxgep = NULL;
662 
663 hxge_detach_exit:
664 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
665 	    status));
666 
667 	return (status);
668 }
669 
670 static void
671 hxge_unattach(p_hxge_t hxgep)
672 {
673 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
674 
675 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
676 		return;
677 	}
678 
679 	if (hxgep->hxge_hw_p) {
680 		hxge_uninit_common_dev(hxgep);
681 		hxgep->hxge_hw_p = NULL;
682 	}
683 
684 	if (hxgep->hxge_timerid) {
685 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
686 		hxgep->hxge_timerid = 0;
687 	}
688 
689 	/* Stop interrupts. */
690 	hxge_intrs_disable(hxgep);
691 
692 	/* Stop any further interrupts. */
693 	hxge_remove_intrs(hxgep);
694 
695 	/* Stop the device and free resources. */
696 	hxge_destroy_dev(hxgep);
697 
698 	/* Tear down the ndd parameters setup. */
699 	hxge_destroy_param(hxgep);
700 
701 	/* Tear down the kstat setup. */
702 	hxge_destroy_kstats(hxgep);
703 
704 	/*
705 	 * Remove the list of ndd parameters which were setup during attach.
706 	 */
707 	if (hxgep->dip) {
708 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
709 		    " hxge_unattach: remove all properties"));
710 		(void) ddi_prop_remove_all(hxgep->dip);
711 	}
712 
713 	/*
714 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
715 	 * previous state before unmapping the registers.
716 	 */
717 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
718 	HXGE_DELAY(1000);
719 
720 	/*
721 	 * Unmap the register setup.
722 	 */
723 	hxge_unmap_regs(hxgep);
724 
725 	hxge_fm_fini(hxgep);
726 
727 	/* Destroy all mutexes.  */
728 	hxge_destroy_mutexes(hxgep);
729 
730 	/*
731 	 * Free the soft state data structures allocated with this instance.
732 	 */
733 	ddi_soft_state_free(hxge_list, hxgep->instance);
734 
735 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
736 }
737 
738 static hxge_status_t
739 hxge_map_regs(p_hxge_t hxgep)
740 {
741 	int		ddi_status = DDI_SUCCESS;
742 	p_dev_regs_t	dev_regs;
743 
744 #ifdef	HXGE_DEBUG
745 	char		*sysname;
746 #endif
747 
748 	off_t		regsize;
749 	hxge_status_t	status = HXGE_OK;
750 	int		nregs;
751 
752 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
753 
754 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
755 		return (HXGE_ERROR);
756 
757 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
758 
759 	hxgep->dev_regs = NULL;
760 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
761 	dev_regs->hxge_regh = NULL;
762 	dev_regs->hxge_pciregh = NULL;
763 	dev_regs->hxge_msix_regh = NULL;
764 
765 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
766 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
767 	    "hxge_map_regs: pci config size 0x%x", regsize));
768 
769 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
770 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
771 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
772 	if (ddi_status != DDI_SUCCESS) {
773 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
774 		    "ddi_map_regs, hxge bus config regs failed"));
775 		goto hxge_map_regs_fail0;
776 	}
777 
778 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
779 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
780 	    dev_regs->hxge_pciregp,
781 	    dev_regs->hxge_pciregh));
782 
783 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
784 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
785 	    "hxge_map_regs: pio size 0x%x", regsize));
786 
787 	/* set up the device mapped register */
788 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
789 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
790 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
791 
792 	if (ddi_status != DDI_SUCCESS) {
793 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
794 		    "ddi_map_regs for Hydra global reg failed"));
795 		goto hxge_map_regs_fail1;
796 	}
797 
798 	/* set up the msi/msi-x mapped register */
799 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
800 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
801 	    "hxge_map_regs: msix size 0x%x", regsize));
802 
803 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
804 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
805 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
806 
807 	if (ddi_status != DDI_SUCCESS) {
808 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
809 		    "ddi_map_regs for msi reg failed"));
810 		goto hxge_map_regs_fail2;
811 	}
812 
813 	hxgep->dev_regs = dev_regs;
814 
815 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
816 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
817 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
818 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
819 
820 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
821 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
822 
823 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
824 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
825 
826 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
827 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
828 
829 	goto hxge_map_regs_exit;
830 
831 hxge_map_regs_fail3:
832 	if (dev_regs->hxge_msix_regh) {
833 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
834 	}
835 
836 hxge_map_regs_fail2:
837 	if (dev_regs->hxge_regh) {
838 		ddi_regs_map_free(&dev_regs->hxge_regh);
839 	}
840 
841 hxge_map_regs_fail1:
842 	if (dev_regs->hxge_pciregh) {
843 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
844 	}
845 
846 hxge_map_regs_fail0:
847 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
848 	kmem_free(dev_regs, sizeof (dev_regs_t));
849 
850 hxge_map_regs_exit:
851 	if (ddi_status != DDI_SUCCESS)
852 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
853 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
854 	return (status);
855 }
856 
857 static void
858 hxge_unmap_regs(p_hxge_t hxgep)
859 {
860 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
861 	if (hxgep->dev_regs) {
862 		if (hxgep->dev_regs->hxge_pciregh) {
863 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
864 			    "==> hxge_unmap_regs: bus"));
865 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
866 			hxgep->dev_regs->hxge_pciregh = NULL;
867 		}
868 
869 		if (hxgep->dev_regs->hxge_regh) {
870 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
871 			    "==> hxge_unmap_regs: device registers"));
872 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
873 			hxgep->dev_regs->hxge_regh = NULL;
874 		}
875 
876 		if (hxgep->dev_regs->hxge_msix_regh) {
877 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
878 			    "==> hxge_unmap_regs: device interrupts"));
879 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
880 			hxgep->dev_regs->hxge_msix_regh = NULL;
881 		}
882 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
883 		hxgep->dev_regs = NULL;
884 	}
885 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
886 }
887 
888 static hxge_status_t
889 hxge_setup_mutexes(p_hxge_t hxgep)
890 {
891 	int		ddi_status = DDI_SUCCESS;
892 	hxge_status_t	status = HXGE_OK;
893 
894 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
895 
896 	/*
897 	 * Get the interrupt cookie so the mutexes can be Initialised.
898 	 */
899 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
900 	    &hxgep->interrupt_cookie);
901 
902 	if (ddi_status != DDI_SUCCESS) {
903 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
904 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
905 		goto hxge_setup_mutexes_exit;
906 	}
907 
908 	/*
909 	 * Initialize mutex's for this device.
910 	 */
911 	MUTEX_INIT(hxgep->genlock, NULL,
912 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
913 	MUTEX_INIT(&hxgep->vmac_lock, NULL,
914 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
915 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
916 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
917 	RW_INIT(&hxgep->filter_lock, NULL,
918 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
919 	MUTEX_INIT(&hxgep->pio_lock, NULL,
920 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
921 	MUTEX_INIT(&hxgep->timeout.lock, NULL,
922 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
923 
924 hxge_setup_mutexes_exit:
925 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
926 	    "<== hxge_setup_mutexes status = %x", status));
927 
928 	if (ddi_status != DDI_SUCCESS)
929 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
930 
931 	return (status);
932 }
933 
934 static void
935 hxge_destroy_mutexes(p_hxge_t hxgep)
936 {
937 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
938 	RW_DESTROY(&hxgep->filter_lock);
939 	MUTEX_DESTROY(&hxgep->vmac_lock);
940 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
941 	MUTEX_DESTROY(hxgep->genlock);
942 	MUTEX_DESTROY(&hxgep->pio_lock);
943 	MUTEX_DESTROY(&hxgep->timeout.lock);
944 
945 	if (hxge_debug_init == 1) {
946 		MUTEX_DESTROY(&hxgedebuglock);
947 		hxge_debug_init = 0;
948 	}
949 
950 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
951 }
952 
953 hxge_status_t
954 hxge_init(p_hxge_t hxgep)
955 {
956 	hxge_status_t status = HXGE_OK;
957 
958 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
959 
960 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
961 		return (status);
962 	}
963 
964 	/*
965 	 * Allocate system memory for the receive/transmit buffer blocks and
966 	 * receive/transmit descriptor rings.
967 	 */
968 	status = hxge_alloc_mem_pool(hxgep);
969 	if (status != HXGE_OK) {
970 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
971 		goto hxge_init_fail1;
972 	}
973 
974 	/*
975 	 * Initialize and enable TXDMA channels.
976 	 */
977 	status = hxge_init_txdma_channels(hxgep);
978 	if (status != HXGE_OK) {
979 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
980 		goto hxge_init_fail3;
981 	}
982 
983 	/*
984 	 * Initialize and enable RXDMA channels.
985 	 */
986 	status = hxge_init_rxdma_channels(hxgep);
987 	if (status != HXGE_OK) {
988 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
989 		goto hxge_init_fail4;
990 	}
991 
992 	/*
993 	 * Initialize TCAM
994 	 */
995 	status = hxge_classify_init(hxgep);
996 	if (status != HXGE_OK) {
997 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
998 		goto hxge_init_fail5;
999 	}
1000 
1001 	/*
1002 	 * Initialize the VMAC block.
1003 	 */
1004 	status = hxge_vmac_init(hxgep);
1005 	if (status != HXGE_OK) {
1006 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1007 		goto hxge_init_fail5;
1008 	}
1009 
1010 	/* Bringup - this may be unnecessary when PXE and FCODE available */
1011 	status = hxge_pfc_set_default_mac_addr(hxgep);
1012 	if (status != HXGE_OK) {
1013 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1014 		    "Default Address Failure\n"));
1015 		goto hxge_init_fail5;
1016 	}
1017 
1018 	/*
1019 	 * Enable hardware interrupts.
1020 	 */
1021 	hxge_intr_hw_enable(hxgep);
1022 	hxgep->drv_state |= STATE_HW_INITIALIZED;
1023 
1024 	goto hxge_init_exit;
1025 
1026 hxge_init_fail5:
1027 	hxge_uninit_rxdma_channels(hxgep);
1028 hxge_init_fail4:
1029 	hxge_uninit_txdma_channels(hxgep);
1030 hxge_init_fail3:
1031 	hxge_free_mem_pool(hxgep);
1032 hxge_init_fail1:
1033 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1034 	    "<== hxge_init status (failed) = 0x%08x", status));
1035 	return (status);
1036 
1037 hxge_init_exit:
1038 
1039 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1040 	    status));
1041 
1042 	return (status);
1043 }
1044 
1045 timeout_id_t
1046 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1047 {
1048 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1049 		return (timeout(func, (caddr_t)hxgep,
1050 		    drv_usectohz(1000 * msec)));
1051 	}
1052 	return (NULL);
1053 }
1054 
1055 /*ARGSUSED*/
1056 void
1057 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1058 {
1059 	if (timerid) {
1060 		(void) untimeout(timerid);
1061 	}
1062 }
1063 
1064 void
1065 hxge_uninit(p_hxge_t hxgep)
1066 {
1067 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1068 
1069 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1070 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1071 		    "==> hxge_uninit: not initialized"));
1072 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1073 		return;
1074 	}
1075 
1076 	/* Stop timer */
1077 	if (hxgep->hxge_timerid) {
1078 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1079 		hxgep->hxge_timerid = 0;
1080 	}
1081 
1082 	(void) hxge_intr_hw_disable(hxgep);
1083 
1084 	/* Reset the receive VMAC side.  */
1085 	(void) hxge_rx_vmac_disable(hxgep);
1086 
1087 	/* Free classification resources */
1088 	(void) hxge_classify_uninit(hxgep);
1089 
1090 	/* Reset the transmit/receive DMA side.  */
1091 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1092 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1093 
1094 	hxge_uninit_txdma_channels(hxgep);
1095 	hxge_uninit_rxdma_channels(hxgep);
1096 
1097 	/* Reset the transmit VMAC side.  */
1098 	(void) hxge_tx_vmac_disable(hxgep);
1099 
1100 	hxge_free_mem_pool(hxgep);
1101 
1102 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1103 
1104 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1105 }
1106 
1107 /*ARGSUSED*/
1108 /*VARARGS*/
1109 void
1110 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1111 {
1112 	char		msg_buffer[1048];
1113 	char		prefix_buffer[32];
1114 	int		instance;
1115 	uint64_t	debug_level;
1116 	int		cmn_level = CE_CONT;
1117 	va_list		ap;
1118 
1119 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1120 	    hxgep->hxge_debug_level;
1121 
1122 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1123 	    (level == HXGE_ERR_CTL)) {
1124 		/* do the msg processing */
1125 		if (hxge_debug_init == 0) {
1126 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1127 			hxge_debug_init = 1;
1128 		}
1129 
1130 		MUTEX_ENTER(&hxgedebuglock);
1131 
1132 		if ((level & HXGE_NOTE)) {
1133 			cmn_level = CE_NOTE;
1134 		}
1135 
1136 		if (level & HXGE_ERR_CTL) {
1137 			cmn_level = CE_WARN;
1138 		}
1139 
1140 		va_start(ap, fmt);
1141 		(void) vsprintf(msg_buffer, fmt, ap);
1142 		va_end(ap);
1143 
1144 		if (hxgep == NULL) {
1145 			instance = -1;
1146 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1147 		} else {
1148 			instance = hxgep->instance;
1149 			(void) sprintf(prefix_buffer,
1150 			    "%s%d :", "hxge", instance);
1151 		}
1152 
1153 		MUTEX_EXIT(&hxgedebuglock);
1154 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1155 	}
1156 }
1157 
1158 char *
1159 hxge_dump_packet(char *addr, int size)
1160 {
1161 	uchar_t		*ap = (uchar_t *)addr;
1162 	int		i;
1163 	static char	etherbuf[1024];
1164 	char		*cp = etherbuf;
1165 	char		digits[] = "0123456789abcdef";
1166 
1167 	if (!size)
1168 		size = 60;
1169 
1170 	if (size > MAX_DUMP_SZ) {
1171 		/* Dump the leading bytes */
1172 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1173 			if (*ap > 0x0f)
1174 				*cp++ = digits[*ap >> 4];
1175 			*cp++ = digits[*ap++ & 0xf];
1176 			*cp++ = ':';
1177 		}
1178 		for (i = 0; i < 20; i++)
1179 			*cp++ = '.';
1180 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1181 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1182 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1183 			if (*ap > 0x0f)
1184 				*cp++ = digits[*ap >> 4];
1185 			*cp++ = digits[*ap++ & 0xf];
1186 			*cp++ = ':';
1187 		}
1188 	} else {
1189 		for (i = 0; i < size; i++) {
1190 			if (*ap > 0x0f)
1191 				*cp++ = digits[*ap >> 4];
1192 			*cp++ = digits[*ap++ & 0xf];
1193 			*cp++ = ':';
1194 		}
1195 	}
1196 	*--cp = 0;
1197 	return (etherbuf);
1198 }
1199 
1200 static void
1201 hxge_suspend(p_hxge_t hxgep)
1202 {
1203 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1204 
1205 	/*
1206 	 * Stop the link status timer before hxge_intrs_disable() to avoid
1207 	 * accessing the the MSIX table simultaneously. Note that the timer
1208 	 * routine polls for MSIX parity errors.
1209 	 */
1210 	MUTEX_ENTER(&hxgep->timeout.lock);
1211 	if (hxgep->timeout.id)
1212 		(void) untimeout(hxgep->timeout.id);
1213 	MUTEX_EXIT(&hxgep->timeout.lock);
1214 
1215 	hxge_intrs_disable(hxgep);
1216 	hxge_destroy_dev(hxgep);
1217 
1218 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1219 }
1220 
1221 static hxge_status_t
1222 hxge_resume(p_hxge_t hxgep)
1223 {
1224 	hxge_status_t status = HXGE_OK;
1225 
1226 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1227 	hxgep->suspended = DDI_RESUME;
1228 
1229 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1230 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1231 
1232 	(void) hxge_rx_vmac_enable(hxgep);
1233 	(void) hxge_tx_vmac_enable(hxgep);
1234 
1235 	hxge_intrs_enable(hxgep);
1236 
1237 	hxgep->suspended = 0;
1238 
1239 	/*
1240 	 * Resume the link status timer after hxge_intrs_enable to avoid
1241 	 * accessing MSIX table simultaneously.
1242 	 */
1243 	MUTEX_ENTER(&hxgep->timeout.lock);
1244 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1245 	    hxgep->timeout.ticks);
1246 	MUTEX_EXIT(&hxgep->timeout.lock);
1247 
1248 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1249 	    "<== hxge_resume status = 0x%x", status));
1250 
1251 	return (status);
1252 }
1253 
1254 static hxge_status_t
1255 hxge_setup_dev(p_hxge_t hxgep)
1256 {
1257 	hxge_status_t status = HXGE_OK;
1258 
1259 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1260 
1261 	status = hxge_link_init(hxgep);
1262 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1263 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1264 		    "Bad register acc handle"));
1265 		status = HXGE_ERROR;
1266 	}
1267 
1268 	if (status != HXGE_OK) {
1269 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1270 		    " hxge_setup_dev status (link init 0x%08x)", status));
1271 		goto hxge_setup_dev_exit;
1272 	}
1273 
1274 hxge_setup_dev_exit:
1275 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1276 	    "<== hxge_setup_dev status = 0x%08x", status));
1277 
1278 	return (status);
1279 }
1280 
1281 static void
1282 hxge_destroy_dev(p_hxge_t hxgep)
1283 {
1284 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1285 
1286 	(void) hxge_hw_stop(hxgep);
1287 
1288 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1289 }
1290 
1291 static hxge_status_t
1292 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1293 {
1294 	int			ddi_status = DDI_SUCCESS;
1295 	uint_t			count;
1296 	ddi_dma_cookie_t	cookie;
1297 	uint_t			iommu_pagesize;
1298 	hxge_status_t		status = HXGE_OK;
1299 
1300 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1301 
1302 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1303 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1304 
1305 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1306 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1307 	    " default_block_size %d iommu_pagesize %d",
1308 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1309 	    hxgep->rx_default_block_size, iommu_pagesize));
1310 
1311 	if (iommu_pagesize != 0) {
1312 		if (hxgep->sys_page_sz == iommu_pagesize) {
1313 			/* Hydra support up to 8K pages */
1314 			if (iommu_pagesize > 0x2000)
1315 				hxgep->sys_page_sz = 0x2000;
1316 		} else {
1317 			if (hxgep->sys_page_sz > iommu_pagesize)
1318 				hxgep->sys_page_sz = iommu_pagesize;
1319 		}
1320 	}
1321 
1322 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1323 
1324 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1325 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1326 	    "default_block_size %d page mask %d",
1327 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1328 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1329 
1330 	switch (hxgep->sys_page_sz) {
1331 	default:
1332 		hxgep->sys_page_sz = 0x1000;
1333 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1334 		hxgep->rx_default_block_size = 0x1000;
1335 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1336 		break;
1337 	case 0x1000:
1338 		hxgep->rx_default_block_size = 0x1000;
1339 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1340 		break;
1341 	case 0x2000:
1342 		hxgep->rx_default_block_size = 0x2000;
1343 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1344 		break;
1345 	}
1346 
1347 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1348 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1349 
1350 	/*
1351 	 * Get the system DMA burst size.
1352 	 */
1353 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1354 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1355 	if (ddi_status != DDI_SUCCESS) {
1356 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1357 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1358 		goto hxge_get_soft_properties_exit;
1359 	}
1360 
1361 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1362 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1363 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1364 	    &cookie, &count);
1365 	if (ddi_status != DDI_DMA_MAPPED) {
1366 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1367 		    "Binding spare handle to find system burstsize failed."));
1368 		ddi_status = DDI_FAILURE;
1369 		goto hxge_get_soft_properties_fail1;
1370 	}
1371 
1372 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1373 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1374 
1375 hxge_get_soft_properties_fail1:
1376 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1377 
1378 hxge_get_soft_properties_exit:
1379 
1380 	if (ddi_status != DDI_SUCCESS)
1381 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1382 
1383 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1384 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1385 
1386 	return (status);
1387 }
1388 
1389 static hxge_status_t
1390 hxge_alloc_mem_pool(p_hxge_t hxgep)
1391 {
1392 	hxge_status_t status = HXGE_OK;
1393 
1394 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1395 
1396 	status = hxge_alloc_rx_mem_pool(hxgep);
1397 	if (status != HXGE_OK) {
1398 		return (HXGE_ERROR);
1399 	}
1400 
1401 	status = hxge_alloc_tx_mem_pool(hxgep);
1402 	if (status != HXGE_OK) {
1403 		hxge_free_rx_mem_pool(hxgep);
1404 		return (HXGE_ERROR);
1405 	}
1406 
1407 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1408 	return (HXGE_OK);
1409 }
1410 
1411 static void
1412 hxge_free_mem_pool(p_hxge_t hxgep)
1413 {
1414 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1415 
1416 	hxge_free_rx_mem_pool(hxgep);
1417 	hxge_free_tx_mem_pool(hxgep);
1418 
1419 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1420 }
1421 
1422 static hxge_status_t
1423 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1424 {
1425 	int			i, j;
1426 	uint32_t		ndmas, st_rdc;
1427 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1428 	p_hxge_hw_pt_cfg_t	p_cfgp;
1429 	p_hxge_dma_pool_t	dma_poolp;
1430 	p_hxge_dma_common_t	*dma_buf_p;
1431 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1432 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1433 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1434 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1435 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1436 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1437 	size_t			rx_buf_alloc_size;
1438 	size_t			rx_rbr_cntl_alloc_size;
1439 	size_t			rx_rcr_cntl_alloc_size;
1440 	size_t			rx_mbox_cntl_alloc_size;
1441 	uint32_t		*num_chunks;	/* per dma */
1442 	hxge_status_t		status = HXGE_OK;
1443 
1444 	uint32_t		hxge_port_rbr_size;
1445 	uint32_t		hxge_port_rbr_spare_size;
1446 	uint32_t		hxge_port_rcr_size;
1447 
1448 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1449 
1450 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1451 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1452 	st_rdc = p_cfgp->start_rdc;
1453 	ndmas = p_cfgp->max_rdcs;
1454 
1455 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1456 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1457 
1458 	/*
1459 	 * Allocate memory for each receive DMA channel.
1460 	 */
1461 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1462 	    KM_SLEEP);
1463 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1464 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1465 
1466 	dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1467 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1468 	dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1469 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1470 	dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1471 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1472 	dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1473 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1474 	dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1475 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1476 	dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1477 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1478 
1479 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1480 	    KM_SLEEP);
1481 
1482 	/*
1483 	 * Assume that each DMA channel will be configured with default block
1484 	 * size. rbr block counts are mod of batch count (16).
1485 	 */
1486 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1487 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1488 
1489 	if (!hxge_port_rbr_size) {
1490 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1491 	}
1492 
1493 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1494 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1495 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1496 	}
1497 
1498 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1499 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1500 
1501 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1502 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1503 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1504 	}
1505 
1506 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1507 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1508 
1509 	/*
1510 	 * Addresses of receive block ring, receive completion ring and the
1511 	 * mailbox must be all cache-aligned (64 bytes).
1512 	 */
1513 	rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1514 	rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1515 	rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1516 	rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1517 
1518 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1519 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1520 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1521 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1522 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1523 
1524 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1525 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1526 
1527 	/*
1528 	 * Allocate memory for receive buffers and descriptor rings. Replace
1529 	 * allocation functions with interface functions provided by the
1530 	 * partition manager when it is available.
1531 	 */
1532 	/*
1533 	 * Allocate memory for the receive buffer blocks.
1534 	 */
1535 	for (i = 0; i < ndmas; i++) {
1536 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1537 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1538 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1539 		    i, dma_buf_p[i], &dma_buf_p[i]));
1540 
1541 		num_chunks[i] = 0;
1542 
1543 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1544 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1545 		    &num_chunks[i]);
1546 		if (status != HXGE_OK) {
1547 			break;
1548 		}
1549 
1550 		st_rdc++;
1551 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1552 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1553 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1554 		    dma_buf_p[i], &dma_buf_p[i]));
1555 	}
1556 
1557 	if (i < ndmas) {
1558 		goto hxge_alloc_rx_mem_fail1;
1559 	}
1560 
1561 	/*
1562 	 * Allocate memory for descriptor rings and mailbox.
1563 	 */
1564 	st_rdc = p_cfgp->start_rdc;
1565 	for (j = 0; j < ndmas; j++) {
1566 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1567 		    &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1568 		    rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1569 			break;
1570 		}
1571 
1572 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1573 		    &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1574 		    rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1575 			break;
1576 		}
1577 
1578 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1579 		    &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1580 		    rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1581 			break;
1582 		}
1583 		st_rdc++;
1584 	}
1585 
1586 	if (j < ndmas) {
1587 		goto hxge_alloc_rx_mem_fail2;
1588 	}
1589 
1590 	dma_poolp->ndmas = ndmas;
1591 	dma_poolp->num_chunks = num_chunks;
1592 	dma_poolp->buf_allocated = B_TRUE;
1593 	hxgep->rx_buf_pool_p = dma_poolp;
1594 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1595 
1596 	dma_rbr_cntl_poolp->ndmas = ndmas;
1597 	dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1598 	hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1599 	dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1600 
1601 	dma_rcr_cntl_poolp->ndmas = ndmas;
1602 	dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1603 	hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1604 	dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1605 
1606 	dma_mbox_cntl_poolp->ndmas = ndmas;
1607 	dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1608 	hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1609 	dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1610 
1611 	goto hxge_alloc_rx_mem_pool_exit;
1612 
1613 hxge_alloc_rx_mem_fail2:
1614 	/* Free control buffers */
1615 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1616 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1617 	for (; j >= 0; j--) {
1618 		hxge_free_rx_cntl_dma(hxgep,
1619 		    (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1620 		hxge_free_rx_cntl_dma(hxgep,
1621 		    (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1622 		hxge_free_rx_cntl_dma(hxgep,
1623 		    (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1624 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1625 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1626 	}
1627 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1628 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1629 
1630 hxge_alloc_rx_mem_fail1:
1631 	/* Free data buffers */
1632 	i--;
1633 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1634 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1635 	for (; i >= 0; i--) {
1636 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1637 		    num_chunks[i]);
1638 	}
1639 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1640 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1641 
1642 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1643 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1644 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1645 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1646 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1647 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1648 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1649 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1650 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1651 
1652 hxge_alloc_rx_mem_pool_exit:
1653 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1654 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1655 
1656 	return (status);
1657 }
1658 
1659 static void
1660 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1661 {
1662 	uint32_t		i, ndmas;
1663 	p_hxge_dma_pool_t	dma_poolp;
1664 	p_hxge_dma_common_t	*dma_buf_p;
1665 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1666 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1667 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1668 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1669 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1670 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1671 	uint32_t		*num_chunks;
1672 
1673 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1674 
1675 	dma_poolp = hxgep->rx_buf_pool_p;
1676 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1677 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1678 		    "(null rx buf pool or buf not allocated"));
1679 		return;
1680 	}
1681 
1682 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1683 	if (dma_rbr_cntl_poolp == NULL ||
1684 	    (!dma_rbr_cntl_poolp->buf_allocated)) {
1685 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1686 		    "<== hxge_free_rx_mem_pool "
1687 		    "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1688 		return;
1689 	}
1690 
1691 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1692 	if (dma_rcr_cntl_poolp == NULL ||
1693 	    (!dma_rcr_cntl_poolp->buf_allocated)) {
1694 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1695 		    "<== hxge_free_rx_mem_pool "
1696 		    "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1697 		return;
1698 	}
1699 
1700 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1701 	if (dma_mbox_cntl_poolp == NULL ||
1702 	    (!dma_mbox_cntl_poolp->buf_allocated)) {
1703 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1704 		    "<== hxge_free_rx_mem_pool "
1705 		    "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1706 		return;
1707 	}
1708 
1709 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1710 	num_chunks = dma_poolp->num_chunks;
1711 
1712 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1713 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1714 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1715 	ndmas = dma_rbr_cntl_poolp->ndmas;
1716 
1717 	for (i = 0; i < ndmas; i++) {
1718 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1719 	}
1720 
1721 	for (i = 0; i < ndmas; i++) {
1722 		hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1723 		hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1724 		hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1725 	}
1726 
1727 	for (i = 0; i < ndmas; i++) {
1728 		KMEM_FREE(dma_buf_p[i],
1729 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1730 		KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1731 		KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1732 		KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1733 	}
1734 
1735 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1736 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1737 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1738 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1739 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1740 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1741 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1742 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1743 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1744 
1745 	hxgep->rx_buf_pool_p = NULL;
1746 	hxgep->rx_rbr_cntl_pool_p = NULL;
1747 	hxgep->rx_rcr_cntl_pool_p = NULL;
1748 	hxgep->rx_mbox_cntl_pool_p = NULL;
1749 
1750 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1751 }
1752 
1753 static hxge_status_t
1754 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1755     p_hxge_dma_common_t *dmap,
1756     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1757 {
1758 	p_hxge_dma_common_t	rx_dmap;
1759 	hxge_status_t		status = HXGE_OK;
1760 	size_t			total_alloc_size;
1761 	size_t			allocated = 0;
1762 	int			i, size_index, array_size;
1763 
1764 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1765 
1766 	rx_dmap = (p_hxge_dma_common_t)
1767 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1768 
1769 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1770 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1771 	    dma_channel, alloc_size, block_size, dmap));
1772 
1773 	total_alloc_size = alloc_size;
1774 
1775 	i = 0;
1776 	size_index = 0;
1777 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1778 	while ((size_index < array_size) &&
1779 	    (alloc_sizes[size_index] < alloc_size))
1780 		size_index++;
1781 	if (size_index >= array_size) {
1782 		size_index = array_size - 1;
1783 	}
1784 
1785 	while ((allocated < total_alloc_size) &&
1786 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1787 		rx_dmap[i].dma_chunk_index = i;
1788 		rx_dmap[i].block_size = block_size;
1789 		rx_dmap[i].alength = alloc_sizes[size_index];
1790 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1791 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1792 		rx_dmap[i].dma_channel = dma_channel;
1793 		rx_dmap[i].contig_alloc_type = B_FALSE;
1794 
1795 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1796 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1797 		    "i %d nblocks %d alength %d",
1798 		    dma_channel, i, &rx_dmap[i], block_size,
1799 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1800 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1801 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1802 		    &hxge_dev_buf_dma_acc_attr,
1803 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1804 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1805 		if (status != HXGE_OK) {
1806 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1807 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1808 			    " for size: %d", alloc_sizes[size_index]));
1809 			size_index--;
1810 		} else {
1811 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1812 			    " alloc_rx_buf_dma allocated rdc %d "
1813 			    "chunk %d size %x dvma %x bufp %llx ",
1814 			    dma_channel, i, rx_dmap[i].alength,
1815 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1816 			i++;
1817 			allocated += alloc_sizes[size_index];
1818 		}
1819 	}
1820 
1821 	if (allocated < total_alloc_size) {
1822 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1823 		    " hxge_alloc_rx_buf_dma failed due to"
1824 		    " allocated(%d) < required(%d)",
1825 		    allocated, total_alloc_size));
1826 		goto hxge_alloc_rx_mem_fail1;
1827 	}
1828 
1829 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1830 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1831 
1832 	*num_chunks = i;
1833 	*dmap = rx_dmap;
1834 
1835 	goto hxge_alloc_rx_mem_exit;
1836 
1837 hxge_alloc_rx_mem_fail1:
1838 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1839 
1840 hxge_alloc_rx_mem_exit:
1841 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1842 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1843 
1844 	return (status);
1845 }
1846 
1847 /*ARGSUSED*/
1848 static void
1849 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1850     uint32_t num_chunks)
1851 {
1852 	int i;
1853 
1854 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1855 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1856 
1857 	for (i = 0; i < num_chunks; i++) {
1858 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1859 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1860 		hxge_dma_mem_free(dmap++);
1861 	}
1862 
1863 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1864 }
1865 
1866 /*ARGSUSED*/
1867 static hxge_status_t
1868 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1869     p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1870 {
1871 	p_hxge_dma_common_t	rx_dmap;
1872 	hxge_status_t		status = HXGE_OK;
1873 
1874 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1875 
1876 	rx_dmap = (p_hxge_dma_common_t)
1877 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1878 
1879 	rx_dmap->contig_alloc_type = B_FALSE;
1880 
1881 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1882 	    attr, size, &hxge_dev_desc_dma_acc_attr,
1883 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1884 	if (status != HXGE_OK) {
1885 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1886 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1887 		    " for size: %d", size));
1888 		goto hxge_alloc_rx_cntl_dma_fail1;
1889 	}
1890 
1891 	*dmap = rx_dmap;
1892 
1893 	goto hxge_alloc_rx_cntl_dma_exit;
1894 
1895 hxge_alloc_rx_cntl_dma_fail1:
1896 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1897 
1898 hxge_alloc_rx_cntl_dma_exit:
1899 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1900 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1901 
1902 	return (status);
1903 }
1904 
1905 /*ARGSUSED*/
1906 static void
1907 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1908 {
1909 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1910 
1911 	hxge_dma_mem_free(dmap);
1912 
1913 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1914 }
1915 
1916 static hxge_status_t
1917 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1918 {
1919 	hxge_status_t		status = HXGE_OK;
1920 	int			i, j;
1921 	uint32_t		ndmas, st_tdc;
1922 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1923 	p_hxge_hw_pt_cfg_t	p_cfgp;
1924 	p_hxge_dma_pool_t	dma_poolp;
1925 	p_hxge_dma_common_t	*dma_buf_p;
1926 	p_hxge_dma_pool_t	dma_cntl_poolp;
1927 	p_hxge_dma_common_t	*dma_cntl_p;
1928 	size_t			tx_buf_alloc_size;
1929 	size_t			tx_cntl_alloc_size;
1930 	uint32_t		*num_chunks;	/* per dma */
1931 
1932 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1933 
1934 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1935 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1936 	st_tdc = p_cfgp->start_tdc;
1937 	ndmas = p_cfgp->max_tdcs;
1938 
1939 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1940 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1941 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1942 	/*
1943 	 * Allocate memory for each transmit DMA channel.
1944 	 */
1945 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1946 	    KM_SLEEP);
1947 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1948 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1949 
1950 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1951 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1952 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1953 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1954 
1955 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1956 
1957 	/*
1958 	 * Assume that each DMA channel will be configured with default
1959 	 * transmit bufer size for copying transmit data. (For packet payload
1960 	 * over this limit, packets will not be copied.)
1961 	 */
1962 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1963 
1964 	/*
1965 	 * Addresses of transmit descriptor ring and the mailbox must be all
1966 	 * cache-aligned (64 bytes).
1967 	 */
1968 	tx_cntl_alloc_size = hxge_tx_ring_size;
1969 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1970 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1971 
1972 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1973 	    KM_SLEEP);
1974 
1975 	/*
1976 	 * Allocate memory for transmit buffers and descriptor rings. Replace
1977 	 * allocation functions with interface functions provided by the
1978 	 * partition manager when it is available.
1979 	 *
1980 	 * Allocate memory for the transmit buffer pool.
1981 	 */
1982 	for (i = 0; i < ndmas; i++) {
1983 		num_chunks[i] = 0;
1984 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1985 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1986 		if (status != HXGE_OK) {
1987 			break;
1988 		}
1989 		st_tdc++;
1990 	}
1991 
1992 	if (i < ndmas) {
1993 		goto hxge_alloc_tx_mem_pool_fail1;
1994 	}
1995 
1996 	st_tdc = p_cfgp->start_tdc;
1997 
1998 	/*
1999 	 * Allocate memory for descriptor rings and mailbox.
2000 	 */
2001 	for (j = 0; j < ndmas; j++) {
2002 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2003 		    tx_cntl_alloc_size);
2004 		if (status != HXGE_OK) {
2005 			break;
2006 		}
2007 		st_tdc++;
2008 	}
2009 
2010 	if (j < ndmas) {
2011 		goto hxge_alloc_tx_mem_pool_fail2;
2012 	}
2013 
2014 	dma_poolp->ndmas = ndmas;
2015 	dma_poolp->num_chunks = num_chunks;
2016 	dma_poolp->buf_allocated = B_TRUE;
2017 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2018 	hxgep->tx_buf_pool_p = dma_poolp;
2019 
2020 	dma_cntl_poolp->ndmas = ndmas;
2021 	dma_cntl_poolp->buf_allocated = B_TRUE;
2022 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2023 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2024 
2025 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2026 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2027 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2028 
2029 	goto hxge_alloc_tx_mem_pool_exit;
2030 
2031 hxge_alloc_tx_mem_pool_fail2:
2032 	/* Free control buffers */
2033 	j--;
2034 	for (; j >= 0; j--) {
2035 		hxge_free_tx_cntl_dma(hxgep,
2036 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
2037 	}
2038 
2039 hxge_alloc_tx_mem_pool_fail1:
2040 	/* Free data buffers */
2041 	i--;
2042 	for (; i >= 0; i--) {
2043 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2044 		    num_chunks[i]);
2045 	}
2046 
2047 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2048 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2049 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2050 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2051 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2052 
2053 hxge_alloc_tx_mem_pool_exit:
2054 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2055 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2056 
2057 	return (status);
2058 }
2059 
2060 static hxge_status_t
2061 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2062     p_hxge_dma_common_t *dmap, size_t alloc_size,
2063     size_t block_size, uint32_t *num_chunks)
2064 {
2065 	p_hxge_dma_common_t	tx_dmap;
2066 	hxge_status_t		status = HXGE_OK;
2067 	size_t			total_alloc_size;
2068 	size_t			allocated = 0;
2069 	int			i, size_index, array_size;
2070 
2071 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2072 
2073 	tx_dmap = (p_hxge_dma_common_t)
2074 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2075 
2076 	total_alloc_size = alloc_size;
2077 	i = 0;
2078 	size_index = 0;
2079 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
2080 	while ((size_index < array_size) &&
2081 	    (alloc_sizes[size_index] < alloc_size))
2082 		size_index++;
2083 	if (size_index >= array_size) {
2084 		size_index = array_size - 1;
2085 	}
2086 
2087 	while ((allocated < total_alloc_size) &&
2088 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2089 		tx_dmap[i].dma_chunk_index = i;
2090 		tx_dmap[i].block_size = block_size;
2091 		tx_dmap[i].alength = alloc_sizes[size_index];
2092 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2093 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2094 		tx_dmap[i].dma_channel = dma_channel;
2095 		tx_dmap[i].contig_alloc_type = B_FALSE;
2096 
2097 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2098 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
2099 		    &hxge_dev_buf_dma_acc_attr,
2100 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
2101 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
2102 		if (status != HXGE_OK) {
2103 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2104 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
2105 			    " for size: %d", alloc_sizes[size_index]));
2106 			size_index--;
2107 		} else {
2108 			i++;
2109 			allocated += alloc_sizes[size_index];
2110 		}
2111 	}
2112 
2113 	if (allocated < total_alloc_size) {
2114 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2115 		    " hxge_alloc_tx_buf_dma: failed due to"
2116 		    " allocated(%d) < required(%d)",
2117 		    allocated, total_alloc_size));
2118 		goto hxge_alloc_tx_mem_fail1;
2119 	}
2120 
2121 	*num_chunks = i;
2122 	*dmap = tx_dmap;
2123 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2124 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2125 	    *dmap, i));
2126 	goto hxge_alloc_tx_mem_exit;
2127 
2128 hxge_alloc_tx_mem_fail1:
2129 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2130 
2131 hxge_alloc_tx_mem_exit:
2132 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2133 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2134 
2135 	return (status);
2136 }
2137 
2138 /*ARGSUSED*/
2139 static void
2140 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2141     uint32_t num_chunks)
2142 {
2143 	int i;
2144 
2145 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2146 
2147 	for (i = 0; i < num_chunks; i++) {
2148 		hxge_dma_mem_free(dmap++);
2149 	}
2150 
2151 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2152 }
2153 
2154 /*ARGSUSED*/
2155 static hxge_status_t
2156 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2157     p_hxge_dma_common_t *dmap, size_t size)
2158 {
2159 	p_hxge_dma_common_t	tx_dmap;
2160 	hxge_status_t		status = HXGE_OK;
2161 
2162 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2163 
2164 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2165 	    KM_SLEEP);
2166 
2167 	tx_dmap->contig_alloc_type = B_FALSE;
2168 
2169 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2170 	    &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2171 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2172 	if (status != HXGE_OK) {
2173 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2174 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2175 		    " for size: %d", size));
2176 		goto hxge_alloc_tx_cntl_dma_fail1;
2177 	}
2178 
2179 	*dmap = tx_dmap;
2180 
2181 	goto hxge_alloc_tx_cntl_dma_exit;
2182 
2183 hxge_alloc_tx_cntl_dma_fail1:
2184 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2185 
2186 hxge_alloc_tx_cntl_dma_exit:
2187 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2188 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2189 
2190 	return (status);
2191 }
2192 
2193 /*ARGSUSED*/
2194 static void
2195 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2196 {
2197 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2198 
2199 	hxge_dma_mem_free(dmap);
2200 
2201 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2202 }
2203 
2204 static void
2205 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2206 {
2207 	uint32_t		i, ndmas;
2208 	p_hxge_dma_pool_t	dma_poolp;
2209 	p_hxge_dma_common_t	*dma_buf_p;
2210 	p_hxge_dma_pool_t	dma_cntl_poolp;
2211 	p_hxge_dma_common_t	*dma_cntl_p;
2212 	uint32_t		*num_chunks;
2213 
2214 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2215 
2216 	dma_poolp = hxgep->tx_buf_pool_p;
2217 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2218 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2219 		    "<== hxge_free_tx_mem_pool "
2220 		    "(null rx buf pool or buf not allocated"));
2221 		return;
2222 	}
2223 
2224 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2225 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2226 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2227 		    "<== hxge_free_tx_mem_pool "
2228 		    "(null tx cntl buf pool or cntl buf not allocated"));
2229 		return;
2230 	}
2231 
2232 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2233 	num_chunks = dma_poolp->num_chunks;
2234 
2235 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2236 	ndmas = dma_cntl_poolp->ndmas;
2237 
2238 	for (i = 0; i < ndmas; i++) {
2239 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2240 	}
2241 
2242 	for (i = 0; i < ndmas; i++) {
2243 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2244 	}
2245 
2246 	for (i = 0; i < ndmas; i++) {
2247 		KMEM_FREE(dma_buf_p[i],
2248 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2249 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2250 	}
2251 
2252 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2253 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2254 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2255 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2256 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2257 
2258 	hxgep->tx_buf_pool_p = NULL;
2259 	hxgep->tx_cntl_pool_p = NULL;
2260 
2261 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2262 }
2263 
2264 /*ARGSUSED*/
2265 static hxge_status_t
2266 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2267     struct ddi_dma_attr *dma_attrp,
2268     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2269     p_hxge_dma_common_t dma_p)
2270 {
2271 	caddr_t		kaddrp;
2272 	int		ddi_status = DDI_SUCCESS;
2273 
2274 	dma_p->dma_handle = NULL;
2275 	dma_p->acc_handle = NULL;
2276 	dma_p->kaddrp = NULL;
2277 
2278 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2279 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2280 	if (ddi_status != DDI_SUCCESS) {
2281 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2282 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2283 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2284 	}
2285 
2286 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2287 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2288 	    &dma_p->acc_handle);
2289 	if (ddi_status != DDI_SUCCESS) {
2290 		/* The caller will decide whether it is fatal */
2291 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2292 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2293 		ddi_dma_free_handle(&dma_p->dma_handle);
2294 		dma_p->dma_handle = NULL;
2295 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2296 	}
2297 
2298 	if (dma_p->alength < length) {
2299 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2300 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2301 		ddi_dma_mem_free(&dma_p->acc_handle);
2302 		ddi_dma_free_handle(&dma_p->dma_handle);
2303 		dma_p->acc_handle = NULL;
2304 		dma_p->dma_handle = NULL;
2305 		return (HXGE_ERROR);
2306 	}
2307 
2308 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2309 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2310 	    &dma_p->dma_cookie, &dma_p->ncookies);
2311 	if (ddi_status != DDI_DMA_MAPPED) {
2312 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2313 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2314 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2315 		if (dma_p->acc_handle) {
2316 			ddi_dma_mem_free(&dma_p->acc_handle);
2317 			dma_p->acc_handle = NULL;
2318 		}
2319 		ddi_dma_free_handle(&dma_p->dma_handle);
2320 		dma_p->dma_handle = NULL;
2321 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2322 	}
2323 
2324 	if (dma_p->ncookies != 1) {
2325 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2326 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2327 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2328 		if (dma_p->acc_handle) {
2329 			ddi_dma_mem_free(&dma_p->acc_handle);
2330 			dma_p->acc_handle = NULL;
2331 		}
2332 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2333 		ddi_dma_free_handle(&dma_p->dma_handle);
2334 		dma_p->dma_handle = NULL;
2335 		return (HXGE_ERROR);
2336 	}
2337 
2338 	dma_p->kaddrp = kaddrp;
2339 #if defined(__i386)
2340 	dma_p->ioaddr_pp =
2341 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2342 #else
2343 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2344 #endif
2345 
2346 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2347 
2348 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2349 	    "dma buffer allocated: dma_p $%p "
2350 	    "return dmac_ladress from cookie $%p dmac_size %d "
2351 	    "dma_p->ioaddr_p $%p "
2352 	    "dma_p->orig_ioaddr_p $%p "
2353 	    "orig_vatopa $%p "
2354 	    "alength %d (0x%x) "
2355 	    "kaddrp $%p "
2356 	    "length %d (0x%x)",
2357 	    dma_p,
2358 	    dma_p->dma_cookie.dmac_laddress,
2359 	    dma_p->dma_cookie.dmac_size,
2360 	    dma_p->ioaddr_pp,
2361 	    dma_p->orig_ioaddr_pp,
2362 	    dma_p->orig_vatopa,
2363 	    dma_p->alength, dma_p->alength,
2364 	    kaddrp,
2365 	    length, length));
2366 
2367 	return (HXGE_OK);
2368 }
2369 
2370 static void
2371 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2372 {
2373 	if (dma_p == NULL)
2374 		return;
2375 
2376 	if (dma_p->dma_handle != NULL) {
2377 		if (dma_p->ncookies) {
2378 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2379 			dma_p->ncookies = 0;
2380 		}
2381 		ddi_dma_free_handle(&dma_p->dma_handle);
2382 		dma_p->dma_handle = NULL;
2383 	}
2384 
2385 	if (dma_p->acc_handle != NULL) {
2386 		ddi_dma_mem_free(&dma_p->acc_handle);
2387 		dma_p->acc_handle = NULL;
2388 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2389 	}
2390 
2391 	dma_p->kaddrp = NULL;
2392 	dma_p->alength = NULL;
2393 }
2394 
2395 /*
2396  *	hxge_m_start() -- start transmitting and receiving.
2397  *
2398  *	This function is called by the MAC layer when the first
2399  *	stream is open to prepare the hardware ready for sending
2400  *	and transmitting packets.
2401  */
2402 static int
2403 hxge_m_start(void *arg)
2404 {
2405 	p_hxge_t hxgep = (p_hxge_t)arg;
2406 
2407 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2408 
2409 	MUTEX_ENTER(hxgep->genlock);
2410 
2411 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2412 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2413 		    "<== hxge_m_start: initialization failed"));
2414 		MUTEX_EXIT(hxgep->genlock);
2415 		return (EIO);
2416 	}
2417 
2418 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2419 		/*
2420 		 * Start timer to check the system error and tx hangs
2421 		 */
2422 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2423 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2424 
2425 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2426 
2427 		hxgep->timeout.link_status = 0;
2428 		hxgep->timeout.report_link_status = B_TRUE;
2429 		hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2430 
2431 		/* Start the link status timer to check the link status */
2432 		MUTEX_ENTER(&hxgep->timeout.lock);
2433 		hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2434 		    hxgep->timeout.ticks);
2435 		MUTEX_EXIT(&hxgep->timeout.lock);
2436 	}
2437 
2438 	MUTEX_EXIT(hxgep->genlock);
2439 
2440 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2441 
2442 	return (0);
2443 }
2444 
2445 /*
2446  * hxge_m_stop(): stop transmitting and receiving.
2447  */
2448 static void
2449 hxge_m_stop(void *arg)
2450 {
2451 	p_hxge_t hxgep = (p_hxge_t)arg;
2452 
2453 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2454 
2455 	if (hxgep->hxge_timerid) {
2456 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2457 		hxgep->hxge_timerid = 0;
2458 	}
2459 
2460 	/* Stop the link status timer before unregistering */
2461 	MUTEX_ENTER(&hxgep->timeout.lock);
2462 	if (hxgep->timeout.id) {
2463 		(void) untimeout(hxgep->timeout.id);
2464 		hxgep->timeout.id = 0;
2465 	}
2466 	hxge_link_update(hxgep, LINK_STATE_DOWN);
2467 	MUTEX_EXIT(&hxgep->timeout.lock);
2468 
2469 	MUTEX_ENTER(hxgep->genlock);
2470 
2471 	hxge_uninit(hxgep);
2472 
2473 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2474 
2475 	MUTEX_EXIT(hxgep->genlock);
2476 
2477 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2478 }
2479 
2480 static int
2481 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2482 {
2483 	p_hxge_t		hxgep = (p_hxge_t)arg;
2484 	struct ether_addr	addrp;
2485 
2486 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2487 
2488 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2489 
2490 	if (add) {
2491 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2492 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2493 			    "<== hxge_m_multicst: add multicast failed"));
2494 			return (EINVAL);
2495 		}
2496 	} else {
2497 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2498 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2499 			    "<== hxge_m_multicst: del multicast failed"));
2500 			return (EINVAL);
2501 		}
2502 	}
2503 
2504 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2505 
2506 	return (0);
2507 }
2508 
2509 static int
2510 hxge_m_promisc(void *arg, boolean_t on)
2511 {
2512 	p_hxge_t hxgep = (p_hxge_t)arg;
2513 
2514 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2515 
2516 	if (hxge_set_promisc(hxgep, on)) {
2517 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2518 		    "<== hxge_m_promisc: set promisc failed"));
2519 		return (EINVAL);
2520 	}
2521 
2522 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2523 
2524 	return (0);
2525 }
2526 
2527 static void
2528 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2529 {
2530 	p_hxge_t	hxgep = (p_hxge_t)arg;
2531 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2532 	boolean_t	need_privilege;
2533 	int		err;
2534 	int		cmd;
2535 
2536 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2537 
2538 	iocp = (struct iocblk *)mp->b_rptr;
2539 	iocp->ioc_error = 0;
2540 	need_privilege = B_TRUE;
2541 	cmd = iocp->ioc_cmd;
2542 
2543 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2544 	switch (cmd) {
2545 	default:
2546 		miocnak(wq, mp, 0, EINVAL);
2547 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2548 		return;
2549 
2550 	case LB_GET_INFO_SIZE:
2551 	case LB_GET_INFO:
2552 	case LB_GET_MODE:
2553 		need_privilege = B_FALSE;
2554 		break;
2555 
2556 	case LB_SET_MODE:
2557 		break;
2558 
2559 	case ND_GET:
2560 		need_privilege = B_FALSE;
2561 		break;
2562 	case ND_SET:
2563 		break;
2564 
2565 	case HXGE_GET_TX_RING_SZ:
2566 	case HXGE_GET_TX_DESC:
2567 	case HXGE_TX_SIDE_RESET:
2568 	case HXGE_RX_SIDE_RESET:
2569 	case HXGE_GLOBAL_RESET:
2570 	case HXGE_RESET_MAC:
2571 	case HXGE_PUT_TCAM:
2572 	case HXGE_GET_TCAM:
2573 	case HXGE_RTRACE:
2574 
2575 		need_privilege = B_FALSE;
2576 		break;
2577 	}
2578 
2579 	if (need_privilege) {
2580 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2581 		if (err != 0) {
2582 			miocnak(wq, mp, 0, err);
2583 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2584 			    "<== hxge_m_ioctl: no priv"));
2585 			return;
2586 		}
2587 	}
2588 
2589 	switch (cmd) {
2590 	case ND_GET:
2591 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2592 	case ND_SET:
2593 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2594 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2595 		break;
2596 
2597 	case LB_GET_MODE:
2598 	case LB_SET_MODE:
2599 	case LB_GET_INFO_SIZE:
2600 	case LB_GET_INFO:
2601 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2602 		break;
2603 
2604 	case HXGE_PUT_TCAM:
2605 	case HXGE_GET_TCAM:
2606 	case HXGE_GET_TX_RING_SZ:
2607 	case HXGE_GET_TX_DESC:
2608 	case HXGE_TX_SIDE_RESET:
2609 	case HXGE_RX_SIDE_RESET:
2610 	case HXGE_GLOBAL_RESET:
2611 	case HXGE_RESET_MAC:
2612 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2613 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2614 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2615 		break;
2616 	}
2617 
2618 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2619 }
2620 
2621 /*ARGSUSED*/
2622 static int
2623 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2624 {
2625 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)rdriver;
2626 	p_hxge_t		hxgep;
2627 	p_tx_ring_t		ring;
2628 
2629 	ASSERT(rhp != NULL);
2630 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2631 
2632 	hxgep = rhp->hxgep;
2633 
2634 	/*
2635 	 * Get the ring pointer.
2636 	 */
2637 	ring = hxgep->tx_rings->rings[rhp->index];
2638 
2639 	/*
2640 	 * Fill in the handle for the transmit.
2641 	 */
2642 	MUTEX_ENTER(&ring->lock);
2643 	rhp->started = B_TRUE;
2644 	ring->ring_handle = rhp->ring_handle;
2645 	MUTEX_EXIT(&ring->lock);
2646 
2647 	return (0);
2648 }
2649 
2650 static void
2651 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2652 {
2653 	p_hxge_ring_handle_t    rhp = (p_hxge_ring_handle_t)rdriver;
2654 	p_hxge_t		hxgep;
2655 	p_tx_ring_t		ring;
2656 
2657 	ASSERT(rhp != NULL);
2658 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2659 
2660 	hxgep = rhp->hxgep;
2661 	ring = hxgep->tx_rings->rings[rhp->index];
2662 
2663 	MUTEX_ENTER(&ring->lock);
2664 	ring->ring_handle = (mac_ring_handle_t)NULL;
2665 	rhp->started = B_FALSE;
2666 	MUTEX_EXIT(&ring->lock);
2667 }
2668 
2669 static int
2670 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2671 {
2672 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)rdriver;
2673 	p_hxge_t		hxgep;
2674 	p_rx_rcr_ring_t		ring;
2675 	int			i;
2676 
2677 	ASSERT(rhp != NULL);
2678 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2679 
2680 	hxgep = rhp->hxgep;
2681 
2682 	/*
2683 	 * Get pointer to ring.
2684 	 */
2685 	ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2686 
2687 	MUTEX_ENTER(&ring->lock);
2688 
2689 	if (rhp->started) {
2690 		MUTEX_EXIT(&ring->lock);
2691 		return (0);
2692 	}
2693 
2694 	/*
2695 	 * Set the ldvp and ldgp pointers to enable/disable
2696 	 * polling.
2697 	 */
2698 	for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2699 		if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2700 		    (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2701 			ring->ldvp = &hxgep->ldgvp->ldvp[i];
2702 			ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2703 			break;
2704 		}
2705 	}
2706 
2707 	rhp->started = B_TRUE;
2708 	ring->rcr_mac_handle = rhp->ring_handle;
2709 	ring->rcr_gen_num = mr_gen_num;
2710 	MUTEX_EXIT(&ring->lock);
2711 
2712 	return (0);
2713 }
2714 
2715 static void
2716 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2717 {
2718 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)rdriver;
2719 	p_hxge_t		hxgep;
2720 	p_rx_rcr_ring_t		ring;
2721 
2722 	ASSERT(rhp != NULL);
2723 	ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2724 
2725 	hxgep = rhp->hxgep;
2726 	ring =  hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2727 
2728 	MUTEX_ENTER(&ring->lock);
2729 	rhp->started = B_TRUE;
2730 	ring->rcr_mac_handle = NULL;
2731 	ring->ldvp = NULL;
2732 	ring->ldgp = NULL;
2733 	MUTEX_EXIT(&ring->lock);
2734 }
2735 
2736 static int
2737 hxge_rx_group_start(mac_group_driver_t gdriver)
2738 {
2739 	hxge_ring_group_t	*group = (hxge_ring_group_t *)gdriver;
2740 
2741 	ASSERT(group->hxgep != NULL);
2742 	ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2743 
2744 	MUTEX_ENTER(group->hxgep->genlock);
2745 	group->started = B_TRUE;
2746 	MUTEX_EXIT(group->hxgep->genlock);
2747 
2748 	return (0);
2749 }
2750 
2751 static void
2752 hxge_rx_group_stop(mac_group_driver_t gdriver)
2753 {
2754 	hxge_ring_group_t	*group = (hxge_ring_group_t *)gdriver;
2755 
2756 	ASSERT(group->hxgep != NULL);
2757 	ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2758 	ASSERT(group->started == B_TRUE);
2759 
2760 	MUTEX_ENTER(group->hxgep->genlock);
2761 	group->started = B_FALSE;
2762 	MUTEX_EXIT(group->hxgep->genlock);
2763 }
2764 
2765 static int
2766 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2767 {
2768 	int	i;
2769 
2770 	/*
2771 	 * Find an open slot.
2772 	 */
2773 	for (i = 0; i < hxgep->mmac.total; i++) {
2774 		if (!hxgep->mmac.addrs[i].set) {
2775 			*slot = i;
2776 			return (0);
2777 		}
2778 	}
2779 
2780 	return (ENXIO);
2781 }
2782 
2783 static int
2784 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2785 {
2786 	struct ether_addr	eaddr;
2787 	hxge_status_t		status = HXGE_OK;
2788 
2789 	bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2790 
2791 	/*
2792 	 * Set new interface local address and re-init device.
2793 	 * This is destructive to any other streams attached
2794 	 * to this device.
2795 	 */
2796 	RW_ENTER_WRITER(&hxgep->filter_lock);
2797 	status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2798 	RW_EXIT(&hxgep->filter_lock);
2799 	if (status != HXGE_OK)
2800 		return (status);
2801 
2802 	hxgep->mmac.addrs[slot].set = B_TRUE;
2803 	bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2804 	hxgep->mmac.available--;
2805 	if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2806 		hxgep->mmac.addrs[slot].primary = B_TRUE;
2807 
2808 	return (0);
2809 }
2810 
2811 static int
2812 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2813 {
2814 	int	i, result;
2815 
2816 	for (i = 0; i < hxgep->mmac.total; i++) {
2817 		if (hxgep->mmac.addrs[i].set) {
2818 			result = memcmp(hxgep->mmac.addrs[i].addr,
2819 			    addr, ETHERADDRL);
2820 			if (result == 0) {
2821 				*slot = i;
2822 				return (0);
2823 			}
2824 		}
2825 	}
2826 
2827 	return (EINVAL);
2828 }
2829 
2830 static int
2831 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2832 {
2833 	hxge_status_t	status;
2834 	int		i;
2835 
2836 	status = hxge_pfc_clear_mac_address(hxgep, slot);
2837 	if (status != HXGE_OK)
2838 		return (status);
2839 
2840 	for (i = 0; i < ETHERADDRL; i++)
2841 		hxgep->mmac.addrs[slot].addr[i] = 0;
2842 
2843 	hxgep->mmac.addrs[slot].set = B_FALSE;
2844 	if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2845 		hxgep->mmac.addrs[slot].primary = B_FALSE;
2846 	hxgep->mmac.available++;
2847 
2848 	return (0);
2849 }
2850 
2851 static int
2852 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2853 {
2854 	hxge_ring_group_t	*group = arg;
2855 	p_hxge_t		hxgep = group->hxgep;
2856 	int			slot = 0;
2857 
2858 	ASSERT(group->type == MAC_RING_TYPE_RX);
2859 
2860 	MUTEX_ENTER(hxgep->genlock);
2861 
2862 	/*
2863 	 * Find a slot for the address.
2864 	 */
2865 	if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2866 		MUTEX_EXIT(hxgep->genlock);
2867 		return (ENOSPC);
2868 	}
2869 
2870 	/*
2871 	 * Program the MAC address.
2872 	 */
2873 	if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2874 		MUTEX_EXIT(hxgep->genlock);
2875 		return (ENOSPC);
2876 	}
2877 
2878 	MUTEX_EXIT(hxgep->genlock);
2879 	return (0);
2880 }
2881 
2882 static int
2883 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2884 {
2885 	hxge_ring_group_t	*group = arg;
2886 	p_hxge_t		hxgep = group->hxgep;
2887 	int			rv, slot;
2888 
2889 	ASSERT(group->type == MAC_RING_TYPE_RX);
2890 
2891 	MUTEX_ENTER(hxgep->genlock);
2892 
2893 	if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2894 		MUTEX_EXIT(hxgep->genlock);
2895 		return (rv);
2896 	}
2897 
2898 	if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2899 		MUTEX_EXIT(hxgep->genlock);
2900 		return (rv);
2901 	}
2902 
2903 	MUTEX_EXIT(hxgep->genlock);
2904 	return (0);
2905 }
2906 
2907 static void
2908 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2909     mac_group_info_t *infop, mac_group_handle_t gh)
2910 {
2911 	p_hxge_t		hxgep = arg;
2912 	hxge_ring_group_t	*group;
2913 
2914 	ASSERT(type == MAC_RING_TYPE_RX);
2915 
2916 	switch (type) {
2917 	case MAC_RING_TYPE_RX:
2918 		group = &hxgep->rx_groups[groupid];
2919 		group->hxgep = hxgep;
2920 		group->ghandle = gh;
2921 		group->index = groupid;
2922 		group->type = type;
2923 
2924 		infop->mgi_driver = (mac_group_driver_t)group;
2925 		infop->mgi_start = hxge_rx_group_start;
2926 		infop->mgi_stop = hxge_rx_group_stop;
2927 		infop->mgi_addmac = hxge_rx_group_add_mac;
2928 		infop->mgi_remmac = hxge_rx_group_rem_mac;
2929 		infop->mgi_count = HXGE_MAX_RDCS;
2930 		break;
2931 
2932 	case MAC_RING_TYPE_TX:
2933 	default:
2934 		break;
2935 	}
2936 }
2937 
2938 /*
2939  * Callback function for the GLDv3 layer to register all rings.
2940  */
2941 /*ARGSUSED*/
2942 static void
2943 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2944     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2945 {
2946 	p_hxge_t	hxgep = arg;
2947 
2948 	switch (type) {
2949 	case MAC_RING_TYPE_TX: {
2950 		p_hxge_ring_handle_t	rhp;
2951 
2952 		ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2953 		rhp = &hxgep->tx_ring_handles[index];
2954 		rhp->hxgep = hxgep;
2955 		rhp->index = index;
2956 		rhp->ring_handle = rh;
2957 		infop->mri_driver = (mac_ring_driver_t)rhp;
2958 		infop->mri_start = hxge_tx_ring_start;
2959 		infop->mri_stop = hxge_tx_ring_stop;
2960 		infop->mri_tx = hxge_tx_ring_send;
2961 		break;
2962 	}
2963 	case MAC_RING_TYPE_RX: {
2964 		p_hxge_ring_handle_t    rhp;
2965 		mac_intr_t		hxge_mac_intr;
2966 
2967 		ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
2968 		rhp = &hxgep->rx_ring_handles[index];
2969 		rhp->hxgep = hxgep;
2970 		rhp->index = index;
2971 		rhp->ring_handle = rh;
2972 
2973 		/*
2974 		 * Entrypoint to enable interrupt (disable poll) and
2975 		 * disable interrupt (enable poll).
2976 		 */
2977 		hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
2978 		hxge_mac_intr.mi_enable =
2979 		    (mac_intr_enable_t)hxge_disable_poll;
2980 		hxge_mac_intr.mi_disable =
2981 		    (mac_intr_disable_t)hxge_enable_poll;
2982 		infop->mri_driver = (mac_ring_driver_t)rhp;
2983 		infop->mri_start = hxge_rx_ring_start;
2984 		infop->mri_stop = hxge_rx_ring_stop;
2985 		infop->mri_intr = hxge_mac_intr;
2986 		infop->mri_poll = hxge_rx_poll;
2987 		break;
2988 	}
2989 	default:
2990 		break;
2991 	}
2992 }
2993 
2994 /*ARGSUSED*/
2995 boolean_t
2996 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2997 {
2998 	p_hxge_t	hxgep = arg;
2999 
3000 	switch (cap) {
3001 	case MAC_CAPAB_HCKSUM: {
3002 		uint32_t	*txflags = cap_data;
3003 
3004 		*txflags = HCKSUM_INET_PARTIAL;
3005 		break;
3006 	}
3007 
3008 	case MAC_CAPAB_RINGS: {
3009 		mac_capab_rings_t	*cap_rings = cap_data;
3010 
3011 		MUTEX_ENTER(hxgep->genlock);
3012 		if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3013 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3014 			cap_rings->mr_rnum = HXGE_MAX_RDCS;
3015 			cap_rings->mr_rget = hxge_fill_ring;
3016 			cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3017 			cap_rings->mr_gget = hxge_group_get;
3018 			cap_rings->mr_gaddring = NULL;
3019 			cap_rings->mr_gremring = NULL;
3020 		} else {
3021 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3022 			cap_rings->mr_rnum = HXGE_MAX_TDCS;
3023 			cap_rings->mr_rget = hxge_fill_ring;
3024 			cap_rings->mr_gnum = 0;
3025 			cap_rings->mr_gget = NULL;
3026 			cap_rings->mr_gaddring = NULL;
3027 			cap_rings->mr_gremring = NULL;
3028 		}
3029 		MUTEX_EXIT(hxgep->genlock);
3030 		break;
3031 	}
3032 
3033 	default:
3034 		return (B_FALSE);
3035 	}
3036 	return (B_TRUE);
3037 }
3038 
3039 static boolean_t
3040 hxge_param_locked(mac_prop_id_t pr_num)
3041 {
3042 	/*
3043 	 * All adv_* parameters are locked (read-only) while
3044 	 * the device is in any sort of loopback mode ...
3045 	 */
3046 	switch (pr_num) {
3047 		case MAC_PROP_ADV_1000FDX_CAP:
3048 		case MAC_PROP_EN_1000FDX_CAP:
3049 		case MAC_PROP_ADV_1000HDX_CAP:
3050 		case MAC_PROP_EN_1000HDX_CAP:
3051 		case MAC_PROP_ADV_100FDX_CAP:
3052 		case MAC_PROP_EN_100FDX_CAP:
3053 		case MAC_PROP_ADV_100HDX_CAP:
3054 		case MAC_PROP_EN_100HDX_CAP:
3055 		case MAC_PROP_ADV_10FDX_CAP:
3056 		case MAC_PROP_EN_10FDX_CAP:
3057 		case MAC_PROP_ADV_10HDX_CAP:
3058 		case MAC_PROP_EN_10HDX_CAP:
3059 		case MAC_PROP_AUTONEG:
3060 		case MAC_PROP_FLOWCTRL:
3061 			return (B_TRUE);
3062 	}
3063 	return (B_FALSE);
3064 }
3065 
3066 /*
3067  * callback functions for set/get of properties
3068  */
3069 static int
3070 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3071     uint_t pr_valsize, const void *pr_val)
3072 {
3073 	hxge_t		*hxgep = barg;
3074 	p_hxge_stats_t	statsp;
3075 	int		err = 0;
3076 	uint32_t	new_mtu, old_framesize, new_framesize;
3077 
3078 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3079 
3080 	statsp = hxgep->statsp;
3081 	MUTEX_ENTER(hxgep->genlock);
3082 	if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3083 	    hxge_param_locked(pr_num)) {
3084 		/*
3085 		 * All adv_* parameters are locked (read-only)
3086 		 * while the device is in any sort of loopback mode.
3087 		 */
3088 		HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3089 		    "==> hxge_m_setprop: loopback mode: read only"));
3090 		MUTEX_EXIT(hxgep->genlock);
3091 		return (EBUSY);
3092 	}
3093 
3094 	switch (pr_num) {
3095 		/*
3096 		 * These properties are either not exist or read only
3097 		 */
3098 		case MAC_PROP_EN_1000FDX_CAP:
3099 		case MAC_PROP_EN_100FDX_CAP:
3100 		case MAC_PROP_EN_10FDX_CAP:
3101 		case MAC_PROP_EN_1000HDX_CAP:
3102 		case MAC_PROP_EN_100HDX_CAP:
3103 		case MAC_PROP_EN_10HDX_CAP:
3104 		case MAC_PROP_ADV_1000FDX_CAP:
3105 		case MAC_PROP_ADV_1000HDX_CAP:
3106 		case MAC_PROP_ADV_100FDX_CAP:
3107 		case MAC_PROP_ADV_100HDX_CAP:
3108 		case MAC_PROP_ADV_10FDX_CAP:
3109 		case MAC_PROP_ADV_10HDX_CAP:
3110 		case MAC_PROP_STATUS:
3111 		case MAC_PROP_SPEED:
3112 		case MAC_PROP_DUPLEX:
3113 		case MAC_PROP_AUTONEG:
3114 		/*
3115 		 * Flow control is handled in the shared domain and
3116 		 * it is readonly here.
3117 		 */
3118 		case MAC_PROP_FLOWCTRL:
3119 			err = EINVAL;
3120 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3121 			    "==> hxge_m_setprop:  read only property %d",
3122 			    pr_num));
3123 			break;
3124 
3125 		case MAC_PROP_MTU:
3126 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3127 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3128 			    "==> hxge_m_setprop: set MTU: %d", new_mtu));
3129 
3130 			new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3131 			if (new_framesize == hxgep->vmac.maxframesize) {
3132 				err = 0;
3133 				break;
3134 			}
3135 
3136 			if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3137 				err = EBUSY;
3138 				break;
3139 			}
3140 
3141 			if (new_framesize < MIN_FRAME_SIZE ||
3142 			    new_framesize > MAX_FRAME_SIZE) {
3143 				err = EINVAL;
3144 				break;
3145 			}
3146 
3147 			old_framesize = hxgep->vmac.maxframesize;
3148 			hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3149 
3150 			if (hxge_vmac_set_framesize(hxgep)) {
3151 				hxgep->vmac.maxframesize =
3152 				    (uint16_t)old_framesize;
3153 				err = EINVAL;
3154 				break;
3155 			}
3156 
3157 			err = mac_maxsdu_update(hxgep->mach, new_mtu);
3158 			if (err) {
3159 				hxgep->vmac.maxframesize =
3160 				    (uint16_t)old_framesize;
3161 				(void) hxge_vmac_set_framesize(hxgep);
3162 			}
3163 
3164 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3165 			    "==> hxge_m_setprop: set MTU: %d maxframe %d",
3166 			    new_mtu, hxgep->vmac.maxframesize));
3167 			break;
3168 
3169 		case MAC_PROP_PRIVATE:
3170 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3171 			    "==> hxge_m_setprop: private property"));
3172 			err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3173 			    pr_val);
3174 			break;
3175 
3176 		default:
3177 			err = ENOTSUP;
3178 			break;
3179 	}
3180 
3181 	MUTEX_EXIT(hxgep->genlock);
3182 
3183 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3184 	    "<== hxge_m_setprop (return %d)", err));
3185 
3186 	return (err);
3187 }
3188 
3189 /* ARGSUSED */
3190 static int
3191 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
3192     void *pr_val)
3193 {
3194 	int		err = 0;
3195 	link_flowctrl_t	fl;
3196 
3197 	switch (pr_num) {
3198 	case MAC_PROP_DUPLEX:
3199 		*(uint8_t *)pr_val = 2;
3200 		break;
3201 	case MAC_PROP_AUTONEG:
3202 		*(uint8_t *)pr_val = 0;
3203 		break;
3204 	case MAC_PROP_FLOWCTRL:
3205 		if (pr_valsize < sizeof (link_flowctrl_t))
3206 			return (EINVAL);
3207 		fl = LINK_FLOWCTRL_TX;
3208 		bcopy(&fl, pr_val, sizeof (fl));
3209 		break;
3210 	default:
3211 		err = ENOTSUP;
3212 		break;
3213 	}
3214 	return (err);
3215 }
3216 
3217 static int
3218 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3219     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3220 {
3221 	hxge_t 		*hxgep = barg;
3222 	p_hxge_stats_t	statsp = hxgep->statsp;
3223 	int		err = 0;
3224 	link_flowctrl_t fl;
3225 	uint64_t	tmp = 0;
3226 	link_state_t	ls;
3227 
3228 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3229 	    "==> hxge_m_getprop: pr_num %d", pr_num));
3230 
3231 	if (pr_valsize == 0)
3232 		return (EINVAL);
3233 
3234 	*perm = MAC_PROP_PERM_RW;
3235 
3236 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3237 		err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val);
3238 		return (err);
3239 	}
3240 
3241 	bzero(pr_val, pr_valsize);
3242 	switch (pr_num) {
3243 		case MAC_PROP_DUPLEX:
3244 			*perm = MAC_PROP_PERM_READ;
3245 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3246 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3247 			    "==> hxge_m_getprop: duplex mode %d",
3248 			    *(uint8_t *)pr_val));
3249 			break;
3250 
3251 		case MAC_PROP_SPEED:
3252 			*perm = MAC_PROP_PERM_READ;
3253 			if (pr_valsize < sizeof (uint64_t))
3254 				return (EINVAL);
3255 			tmp = statsp->mac_stats.link_speed * 1000000ull;
3256 			bcopy(&tmp, pr_val, sizeof (tmp));
3257 			break;
3258 
3259 		case MAC_PROP_STATUS:
3260 			*perm = MAC_PROP_PERM_READ;
3261 			if (pr_valsize < sizeof (link_state_t))
3262 				return (EINVAL);
3263 			if (!statsp->mac_stats.link_up)
3264 				ls = LINK_STATE_DOWN;
3265 			else
3266 				ls = LINK_STATE_UP;
3267 			bcopy(&ls, pr_val, sizeof (ls));
3268 			break;
3269 
3270 		case MAC_PROP_FLOWCTRL:
3271 			/*
3272 			 * Flow control is supported by the shared domain and
3273 			 * it is currently transmit only
3274 			 */
3275 			*perm = MAC_PROP_PERM_READ;
3276 			if (pr_valsize < sizeof (link_flowctrl_t))
3277 				return (EINVAL);
3278 			fl = LINK_FLOWCTRL_TX;
3279 			bcopy(&fl, pr_val, sizeof (fl));
3280 			break;
3281 		case MAC_PROP_AUTONEG:
3282 			/* 10G link only and it is not negotiable */
3283 			*perm = MAC_PROP_PERM_READ;
3284 			*(uint8_t *)pr_val = 0;
3285 			break;
3286 		case MAC_PROP_ADV_1000FDX_CAP:
3287 		case MAC_PROP_ADV_100FDX_CAP:
3288 		case MAC_PROP_ADV_10FDX_CAP:
3289 		case MAC_PROP_ADV_1000HDX_CAP:
3290 		case MAC_PROP_ADV_100HDX_CAP:
3291 		case MAC_PROP_ADV_10HDX_CAP:
3292 		case MAC_PROP_EN_1000FDX_CAP:
3293 		case MAC_PROP_EN_100FDX_CAP:
3294 		case MAC_PROP_EN_10FDX_CAP:
3295 		case MAC_PROP_EN_1000HDX_CAP:
3296 		case MAC_PROP_EN_100HDX_CAP:
3297 		case MAC_PROP_EN_10HDX_CAP:
3298 			err = ENOTSUP;
3299 			break;
3300 
3301 		case MAC_PROP_PRIVATE:
3302 			err = hxge_get_priv_prop(hxgep, pr_name, pr_flags,
3303 			    pr_valsize, pr_val);
3304 			break;
3305 		case MAC_PROP_MTU: {
3306 			mac_propval_range_t range;
3307 
3308 			if (!(pr_flags & MAC_PROP_POSSIBLE))
3309 				return (ENOTSUP);
3310 			if (pr_valsize < sizeof (mac_propval_range_t))
3311 				return (EINVAL);
3312 			range.mpr_count = 1;
3313 			range.mpr_type = MAC_PROPVAL_UINT32;
3314 			range.range_uint32[0].mpur_min = MIN_FRAME_SIZE -
3315 			    MTU_TO_FRAME_SIZE;
3316 			range.range_uint32[0].mpur_max = MAX_FRAME_SIZE -
3317 			    MTU_TO_FRAME_SIZE;
3318 			bcopy(&range, pr_val, sizeof (range));
3319 			break;
3320 		}
3321 		default:
3322 			err = EINVAL;
3323 			break;
3324 	}
3325 
3326 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3327 
3328 	return (err);
3329 }
3330 
3331 /* ARGSUSED */
3332 static int
3333 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3334     const void *pr_val)
3335 {
3336 	p_hxge_param_t	param_arr = hxgep->param_arr;
3337 	int		err = 0;
3338 
3339 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3340 	    "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3341 
3342 	if (pr_val == NULL) {
3343 		return (EINVAL);
3344 	}
3345 
3346 	/* Blanking */
3347 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3348 		err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3349 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3350 	} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3351 		err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3352 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3353 
3354 	/* Classification */
3355 	} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3356 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3357 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3358 	} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3359 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3360 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3361 	} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3362 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3363 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3364 	} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3365 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3366 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3367 	} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3368 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3369 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3370 	} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3371 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3372 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3373 	} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3374 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3375 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3376 	} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3377 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3378 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3379 	} else {
3380 		err = EINVAL;
3381 	}
3382 
3383 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3384 	    "<== hxge_set_priv_prop: err %d", err));
3385 
3386 	return (err);
3387 }
3388 
3389 static int
3390 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags,
3391     uint_t pr_valsize, void *pr_val)
3392 {
3393 	p_hxge_param_t	param_arr = hxgep->param_arr;
3394 	char		valstr[MAXNAMELEN];
3395 	int		err = 0;
3396 	uint_t		strsize;
3397 	int		value = 0;
3398 
3399 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3400 	    "==> hxge_get_priv_prop: property %s", pr_name));
3401 
3402 	if (pr_flags & MAC_PROP_DEFAULT) {
3403 		/* Receive Interrupt Blanking Parameters */
3404 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3405 			value = RXDMA_RCR_TO_DEFAULT;
3406 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3407 			value = RXDMA_RCR_PTHRES_DEFAULT;
3408 
3409 		/* Classification and Load Distribution Configuration */
3410 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3411 		    strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3412 		    strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3413 		    strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3414 		    strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3415 		    strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3416 		    strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3417 		    strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3418 			value = HXGE_CLASS_TCAM_LOOKUP;
3419 		} else {
3420 			err = EINVAL;
3421 		}
3422 	} else {
3423 		/* Receive Interrupt Blanking Parameters */
3424 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3425 			value = hxgep->intr_timeout;
3426 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3427 			value = hxgep->intr_threshold;
3428 
3429 		/* Classification and Load Distribution Configuration */
3430 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3431 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3432 			    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3433 
3434 			value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3435 		} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3436 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3437 			    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3438 
3439 			value = (int)param_arr[param_class_opt_ipv4_udp].value;
3440 		} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3441 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3442 			    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3443 
3444 			value = (int)param_arr[param_class_opt_ipv4_ah].value;
3445 		} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3446 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3447 			    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3448 
3449 			value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3450 		} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3451 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3452 			    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3453 
3454 			value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3455 		} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3456 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3457 			    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3458 
3459 			value = (int)param_arr[param_class_opt_ipv6_udp].value;
3460 		} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3461 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3462 			    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3463 
3464 			value = (int)param_arr[param_class_opt_ipv6_ah].value;
3465 		} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3466 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3467 			    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3468 
3469 			value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3470 		} else {
3471 			err = EINVAL;
3472 		}
3473 	}
3474 
3475 	if (err == 0) {
3476 		(void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3477 
3478 		strsize = (uint_t)strlen(valstr);
3479 		if (pr_valsize < strsize) {
3480 			err = ENOBUFS;
3481 		} else {
3482 			(void) strlcpy(pr_val, valstr, pr_valsize);
3483 		}
3484 	}
3485 
3486 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3487 	    "<== hxge_get_priv_prop: return %d", err));
3488 
3489 	return (err);
3490 }
3491 /*
3492  * Module loading and removing entry points.
3493  */
3494 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3495     nodev, NULL, D_MP, NULL, NULL);
3496 
3497 extern struct mod_ops mod_driverops;
3498 
3499 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
3500 
3501 /*
3502  * Module linkage information for the kernel.
3503  */
3504 static struct modldrv hxge_modldrv = {
3505 	&mod_driverops,
3506 	HXGE_DESC_VER,
3507 	&hxge_dev_ops
3508 };
3509 
3510 static struct modlinkage modlinkage = {
3511 	MODREV_1, (void *) &hxge_modldrv, NULL
3512 };
3513 
3514 int
3515 _init(void)
3516 {
3517 	int status;
3518 
3519 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3520 	mac_init_ops(&hxge_dev_ops, "hxge");
3521 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3522 	if (status != 0) {
3523 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3524 		    "failed to init device soft state"));
3525 		mac_fini_ops(&hxge_dev_ops);
3526 		goto _init_exit;
3527 	}
3528 
3529 	status = mod_install(&modlinkage);
3530 	if (status != 0) {
3531 		ddi_soft_state_fini(&hxge_list);
3532 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3533 		goto _init_exit;
3534 	}
3535 
3536 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3537 
3538 _init_exit:
3539 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3540 
3541 	return (status);
3542 }
3543 
3544 int
3545 _fini(void)
3546 {
3547 	int status;
3548 
3549 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3550 
3551 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3552 
3553 	if (hxge_mblks_pending)
3554 		return (EBUSY);
3555 
3556 	status = mod_remove(&modlinkage);
3557 	if (status != DDI_SUCCESS) {
3558 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
3559 		    "Module removal failed 0x%08x", status));
3560 		goto _fini_exit;
3561 	}
3562 
3563 	mac_fini_ops(&hxge_dev_ops);
3564 
3565 	ddi_soft_state_fini(&hxge_list);
3566 
3567 	MUTEX_DESTROY(&hxge_common_lock);
3568 
3569 _fini_exit:
3570 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3571 
3572 	return (status);
3573 }
3574 
3575 int
3576 _info(struct modinfo *modinfop)
3577 {
3578 	int status;
3579 
3580 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3581 	status = mod_info(&modlinkage, modinfop);
3582 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3583 
3584 	return (status);
3585 }
3586 
3587 /*ARGSUSED*/
3588 static hxge_status_t
3589 hxge_add_intrs(p_hxge_t hxgep)
3590 {
3591 	int		intr_types;
3592 	int		type = 0;
3593 	int		ddi_status = DDI_SUCCESS;
3594 	hxge_status_t	status = HXGE_OK;
3595 
3596 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3597 
3598 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
3599 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3600 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
3601 	hxgep->hxge_intr_type.intr_added = 0;
3602 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3603 	hxgep->hxge_intr_type.intr_type = 0;
3604 
3605 	if (hxge_msi_enable) {
3606 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3607 	}
3608 
3609 	/* Get the supported interrupt types */
3610 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3611 	    != DDI_SUCCESS) {
3612 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3613 		    "ddi_intr_get_supported_types failed: status 0x%08x",
3614 		    ddi_status));
3615 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3616 	}
3617 
3618 	hxgep->hxge_intr_type.intr_types = intr_types;
3619 
3620 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3621 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3622 
3623 	/*
3624 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3625 	 *	(1): 1 - MSI
3626 	 *	(2): 2 - MSI-X
3627 	 *	others - FIXED
3628 	 */
3629 	switch (hxge_msi_enable) {
3630 	default:
3631 		type = DDI_INTR_TYPE_FIXED;
3632 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3633 		    "use fixed (intx emulation) type %08x", type));
3634 		break;
3635 
3636 	case 2:
3637 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3638 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3639 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3640 			type = DDI_INTR_TYPE_MSIX;
3641 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3642 			    "==> hxge_add_intrs: "
3643 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3644 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3645 			type = DDI_INTR_TYPE_MSI;
3646 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3647 			    "==> hxge_add_intrs: "
3648 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3649 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3650 			type = DDI_INTR_TYPE_FIXED;
3651 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3652 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3653 		}
3654 		break;
3655 
3656 	case 1:
3657 		if (intr_types & DDI_INTR_TYPE_MSI) {
3658 			type = DDI_INTR_TYPE_MSI;
3659 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3660 			    "==> hxge_add_intrs: "
3661 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3662 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3663 			type = DDI_INTR_TYPE_MSIX;
3664 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3665 			    "==> hxge_add_intrs: "
3666 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3667 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3668 			type = DDI_INTR_TYPE_FIXED;
3669 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3670 			    "==> hxge_add_intrs: "
3671 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3672 		}
3673 	}
3674 
3675 	hxgep->hxge_intr_type.intr_type = type;
3676 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3677 	    type == DDI_INTR_TYPE_FIXED) &&
3678 	    hxgep->hxge_intr_type.niu_msi_enable) {
3679 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3680 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3681 			    " hxge_add_intrs: "
3682 			    " hxge_add_intrs_adv failed: status 0x%08x",
3683 			    status));
3684 			return (status);
3685 		} else {
3686 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3687 			    "interrupts registered : type %d", type));
3688 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3689 
3690 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3691 			    "\nAdded advanced hxge add_intr_adv "
3692 			    "intr type 0x%x\n", type));
3693 
3694 			return (status);
3695 		}
3696 	}
3697 
3698 	if (!hxgep->hxge_intr_type.intr_registered) {
3699 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3700 		    "==> hxge_add_intrs: failed to register interrupts"));
3701 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3702 	}
3703 
3704 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3705 
3706 	return (status);
3707 }
3708 
3709 /*ARGSUSED*/
3710 static hxge_status_t
3711 hxge_add_intrs_adv(p_hxge_t hxgep)
3712 {
3713 	int		intr_type;
3714 	p_hxge_intr_t	intrp;
3715 	hxge_status_t	status;
3716 
3717 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3718 
3719 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3720 	intr_type = intrp->intr_type;
3721 
3722 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3723 	    intr_type));
3724 
3725 	switch (intr_type) {
3726 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3727 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3728 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3729 		break;
3730 
3731 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3732 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3733 		break;
3734 
3735 	default:
3736 		status = HXGE_ERROR;
3737 		break;
3738 	}
3739 
3740 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3741 
3742 	return (status);
3743 }
3744 
3745 /*ARGSUSED*/
3746 static hxge_status_t
3747 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3748 {
3749 	dev_info_t	*dip = hxgep->dip;
3750 	p_hxge_ldg_t	ldgp;
3751 	p_hxge_intr_t	intrp;
3752 	uint_t		*inthandler;
3753 	void		*arg1, *arg2;
3754 	int		behavior;
3755 	int		nintrs, navail;
3756 	int		nactual, nrequired, nrequest;
3757 	int		inum = 0;
3758 	int		loop = 0;
3759 	int		x, y;
3760 	int		ddi_status = DDI_SUCCESS;
3761 	hxge_status_t	status = HXGE_OK;
3762 
3763 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3764 
3765 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3766 
3767 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3768 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3769 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3770 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3771 		    "nintrs: %d", ddi_status, nintrs));
3772 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3773 	}
3774 
3775 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3776 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3777 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3778 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3779 		    "nintrs: %d", ddi_status, navail));
3780 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3781 	}
3782 
3783 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3784 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3785 	    int_type, nintrs, navail));
3786 
3787 	/* PSARC/2007/453 MSI-X interrupt limit override */
3788 	if (int_type == DDI_INTR_TYPE_MSIX) {
3789 		nrequest = hxge_create_msi_property(hxgep);
3790 		if (nrequest < navail) {
3791 			navail = nrequest;
3792 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3793 			    "hxge_add_intrs_adv_type: nintrs %d "
3794 			    "navail %d (nrequest %d)",
3795 			    nintrs, navail, nrequest));
3796 		}
3797 	}
3798 
3799 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3800 		/* MSI must be power of 2 */
3801 		if ((navail & 16) == 16) {
3802 			navail = 16;
3803 		} else if ((navail & 8) == 8) {
3804 			navail = 8;
3805 		} else if ((navail & 4) == 4) {
3806 			navail = 4;
3807 		} else if ((navail & 2) == 2) {
3808 			navail = 2;
3809 		} else {
3810 			navail = 1;
3811 		}
3812 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3813 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3814 		    "navail %d", nintrs, navail));
3815 	}
3816 
3817 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3818 	    "requesting: intr type %d nintrs %d, navail %d",
3819 	    int_type, nintrs, navail));
3820 
3821 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3822 	    DDI_INTR_ALLOC_NORMAL);
3823 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3824 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3825 
3826 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3827 	    navail, &nactual, behavior);
3828 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3829 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3830 		    " ddi_intr_alloc() failed: %d", ddi_status));
3831 		kmem_free(intrp->htable, intrp->intr_size);
3832 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3833 	}
3834 
3835 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3836 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3837 	    navail, nactual));
3838 
3839 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3840 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3841 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3842 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3843 		/* Free already allocated interrupts */
3844 		for (y = 0; y < nactual; y++) {
3845 			(void) ddi_intr_free(intrp->htable[y]);
3846 		}
3847 
3848 		kmem_free(intrp->htable, intrp->intr_size);
3849 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3850 	}
3851 
3852 	nrequired = 0;
3853 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3854 	if (status != HXGE_OK) {
3855 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3856 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3857 		    "failed: 0x%x", status));
3858 		/* Free already allocated interrupts */
3859 		for (y = 0; y < nactual; y++) {
3860 			(void) ddi_intr_free(intrp->htable[y]);
3861 		}
3862 
3863 		kmem_free(intrp->htable, intrp->intr_size);
3864 		return (status);
3865 	}
3866 
3867 	ldgp = hxgep->ldgvp->ldgp;
3868 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3869 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3870 
3871 	if (nactual < nrequired)
3872 		loop = nactual;
3873 	else
3874 		loop = nrequired;
3875 
3876 	for (x = 0; x < loop; x++, ldgp++) {
3877 		ldgp->vector = (uint8_t)x;
3878 		arg1 = ldgp->ldvp;
3879 		arg2 = hxgep;
3880 		if (ldgp->nldvs == 1) {
3881 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3882 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3883 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3884 			    "1-1 int handler (entry %d)\n",
3885 			    arg1, arg2, x));
3886 		} else if (ldgp->nldvs > 1) {
3887 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3888 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3889 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3890 			    "nldevs %d int handler (entry %d)\n",
3891 			    arg1, arg2, ldgp->nldvs, x));
3892 		}
3893 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3894 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3895 		    "htable 0x%llx", x, intrp->htable[x]));
3896 
3897 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3898 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3899 		    DDI_SUCCESS) {
3900 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3901 			    "==> hxge_add_intrs_adv_type: failed #%d "
3902 			    "status 0x%x", x, ddi_status));
3903 			for (y = 0; y < intrp->intr_added; y++) {
3904 				(void) ddi_intr_remove_handler(
3905 				    intrp->htable[y]);
3906 			}
3907 
3908 			/* Free already allocated intr */
3909 			for (y = 0; y < nactual; y++) {
3910 				(void) ddi_intr_free(intrp->htable[y]);
3911 			}
3912 			kmem_free(intrp->htable, intrp->intr_size);
3913 
3914 			(void) hxge_ldgv_uninit(hxgep);
3915 
3916 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3917 		}
3918 
3919 		intrp->intr_added++;
3920 	}
3921 	intrp->msi_intx_cnt = nactual;
3922 
3923 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3924 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3925 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3926 
3927 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3928 	(void) hxge_intr_ldgv_init(hxgep);
3929 
3930 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3931 
3932 	return (status);
3933 }
3934 
3935 /*ARGSUSED*/
3936 static hxge_status_t
3937 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3938 {
3939 	dev_info_t	*dip = hxgep->dip;
3940 	p_hxge_ldg_t	ldgp;
3941 	p_hxge_intr_t	intrp;
3942 	uint_t		*inthandler;
3943 	void		*arg1, *arg2;
3944 	int		behavior;
3945 	int		nintrs, navail;
3946 	int		nactual, nrequired;
3947 	int		inum = 0;
3948 	int		x, y;
3949 	int		ddi_status = DDI_SUCCESS;
3950 	hxge_status_t	status = HXGE_OK;
3951 
3952 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3953 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3954 
3955 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3956 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3957 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3958 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3959 		    "nintrs: %d", status, nintrs));
3960 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3961 	}
3962 
3963 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3964 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3965 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3966 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3967 		    "nintrs: %d", ddi_status, navail));
3968 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3969 	}
3970 
3971 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3972 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3973 	    nintrs, navail));
3974 
3975 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3976 	    DDI_INTR_ALLOC_NORMAL);
3977 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3978 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
3979 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3980 	    navail, &nactual, behavior);
3981 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3982 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3983 		    " ddi_intr_alloc() failed: %d", ddi_status));
3984 		kmem_free(intrp->htable, intrp->intr_size);
3985 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3986 	}
3987 
3988 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3989 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3990 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3991 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3992 		/* Free already allocated interrupts */
3993 		for (y = 0; y < nactual; y++) {
3994 			(void) ddi_intr_free(intrp->htable[y]);
3995 		}
3996 
3997 		kmem_free(intrp->htable, intrp->intr_size);
3998 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3999 	}
4000 
4001 	nrequired = 0;
4002 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4003 	if (status != HXGE_OK) {
4004 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4005 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4006 		    "failed: 0x%x", status));
4007 		/* Free already allocated interrupts */
4008 		for (y = 0; y < nactual; y++) {
4009 			(void) ddi_intr_free(intrp->htable[y]);
4010 		}
4011 
4012 		kmem_free(intrp->htable, intrp->intr_size);
4013 		return (status);
4014 	}
4015 
4016 	ldgp = hxgep->ldgvp->ldgp;
4017 	for (x = 0; x < nrequired; x++, ldgp++) {
4018 		ldgp->vector = (uint8_t)x;
4019 		arg1 = ldgp->ldvp;
4020 		arg2 = hxgep;
4021 		if (ldgp->nldvs == 1) {
4022 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4023 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4024 			    "hxge_add_intrs_adv_type_fix: "
4025 			    "1-1 int handler(%d) ldg %d ldv %d "
4026 			    "arg1 $%p arg2 $%p\n",
4027 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4028 		} else if (ldgp->nldvs > 1) {
4029 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4030 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4031 			    "hxge_add_intrs_adv_type_fix: "
4032 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
4033 			    "arg1 0x%016llx arg2 0x%016llx\n",
4034 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4035 			    arg1, arg2));
4036 		}
4037 
4038 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4039 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4040 		    DDI_SUCCESS) {
4041 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4042 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
4043 			    "status 0x%x", x, ddi_status));
4044 			for (y = 0; y < intrp->intr_added; y++) {
4045 				(void) ddi_intr_remove_handler(
4046 				    intrp->htable[y]);
4047 			}
4048 			for (y = 0; y < nactual; y++) {
4049 				(void) ddi_intr_free(intrp->htable[y]);
4050 			}
4051 			/* Free already allocated intr */
4052 			kmem_free(intrp->htable, intrp->intr_size);
4053 
4054 			(void) hxge_ldgv_uninit(hxgep);
4055 
4056 			return (HXGE_ERROR | HXGE_DDI_FAILED);
4057 		}
4058 		intrp->intr_added++;
4059 	}
4060 
4061 	intrp->msi_intx_cnt = nactual;
4062 
4063 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4064 
4065 	status = hxge_intr_ldgv_init(hxgep);
4066 
4067 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4068 
4069 	return (status);
4070 }
4071 
4072 /*ARGSUSED*/
4073 static void
4074 hxge_remove_intrs(p_hxge_t hxgep)
4075 {
4076 	int		i, inum;
4077 	p_hxge_intr_t	intrp;
4078 
4079 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4080 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4081 	if (!intrp->intr_registered) {
4082 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4083 		    "<== hxge_remove_intrs: interrupts not registered"));
4084 		return;
4085 	}
4086 
4087 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4088 
4089 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4090 		(void) ddi_intr_block_disable(intrp->htable,
4091 		    intrp->intr_added);
4092 	} else {
4093 		for (i = 0; i < intrp->intr_added; i++) {
4094 			(void) ddi_intr_disable(intrp->htable[i]);
4095 		}
4096 	}
4097 
4098 	for (inum = 0; inum < intrp->intr_added; inum++) {
4099 		if (intrp->htable[inum]) {
4100 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
4101 		}
4102 	}
4103 
4104 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4105 		if (intrp->htable[inum]) {
4106 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4107 			    "hxge_remove_intrs: ddi_intr_free inum %d "
4108 			    "msi_intx_cnt %d intr_added %d",
4109 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
4110 
4111 			(void) ddi_intr_free(intrp->htable[inum]);
4112 		}
4113 	}
4114 
4115 	kmem_free(intrp->htable, intrp->intr_size);
4116 	intrp->intr_registered = B_FALSE;
4117 	intrp->intr_enabled = B_FALSE;
4118 	intrp->msi_intx_cnt = 0;
4119 	intrp->intr_added = 0;
4120 
4121 	(void) hxge_ldgv_uninit(hxgep);
4122 
4123 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4124 }
4125 
4126 /*ARGSUSED*/
4127 static void
4128 hxge_intrs_enable(p_hxge_t hxgep)
4129 {
4130 	p_hxge_intr_t	intrp;
4131 	int		i;
4132 	int		status;
4133 
4134 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4135 
4136 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4137 
4138 	if (!intrp->intr_registered) {
4139 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4140 		    "interrupts are not registered"));
4141 		return;
4142 	}
4143 
4144 	if (intrp->intr_enabled) {
4145 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4146 		    "<== hxge_intrs_enable: already enabled"));
4147 		return;
4148 	}
4149 
4150 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4151 		status = ddi_intr_block_enable(intrp->htable,
4152 		    intrp->intr_added);
4153 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4154 		    "block enable - status 0x%x total inums #%d\n",
4155 		    status, intrp->intr_added));
4156 	} else {
4157 		for (i = 0; i < intrp->intr_added; i++) {
4158 			status = ddi_intr_enable(intrp->htable[i]);
4159 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4160 			    "ddi_intr_enable:enable - status 0x%x "
4161 			    "total inums %d enable inum #%d\n",
4162 			    status, intrp->intr_added, i));
4163 			if (status == DDI_SUCCESS) {
4164 				intrp->intr_enabled = B_TRUE;
4165 			}
4166 		}
4167 	}
4168 
4169 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4170 }
4171 
4172 /*ARGSUSED*/
4173 static void
4174 hxge_intrs_disable(p_hxge_t hxgep)
4175 {
4176 	p_hxge_intr_t	intrp;
4177 	int		i;
4178 
4179 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4180 
4181 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4182 
4183 	if (!intrp->intr_registered) {
4184 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4185 		    "interrupts are not registered"));
4186 		return;
4187 	}
4188 
4189 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4190 		(void) ddi_intr_block_disable(intrp->htable,
4191 		    intrp->intr_added);
4192 	} else {
4193 		for (i = 0; i < intrp->intr_added; i++) {
4194 			(void) ddi_intr_disable(intrp->htable[i]);
4195 		}
4196 	}
4197 
4198 	intrp->intr_enabled = B_FALSE;
4199 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4200 }
4201 
4202 static hxge_status_t
4203 hxge_mac_register(p_hxge_t hxgep)
4204 {
4205 	mac_register_t	*macp;
4206 	int		status;
4207 
4208 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4209 
4210 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4211 		return (HXGE_ERROR);
4212 
4213 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4214 	macp->m_driver = hxgep;
4215 	macp->m_dip = hxgep->dip;
4216 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4217 	macp->m_callbacks = &hxge_m_callbacks;
4218 	macp->m_min_sdu = 0;
4219 	macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4220 	macp->m_margin = VLAN_TAGSZ;
4221 	macp->m_priv_props = hxge_priv_props;
4222 	macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS;
4223 	macp->m_v12n = MAC_VIRT_LEVEL1;
4224 
4225 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4226 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4227 	    macp->m_src_addr[0],
4228 	    macp->m_src_addr[1],
4229 	    macp->m_src_addr[2],
4230 	    macp->m_src_addr[3],
4231 	    macp->m_src_addr[4],
4232 	    macp->m_src_addr[5]));
4233 
4234 	status = mac_register(macp, &hxgep->mach);
4235 	mac_free(macp);
4236 
4237 	if (status != 0) {
4238 		cmn_err(CE_WARN,
4239 		    "hxge_mac_register failed (status %d instance %d)",
4240 		    status, hxgep->instance);
4241 		return (HXGE_ERROR);
4242 	}
4243 
4244 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4245 	    "(instance %d)", hxgep->instance));
4246 
4247 	return (HXGE_OK);
4248 }
4249 
4250 static int
4251 hxge_init_common_dev(p_hxge_t hxgep)
4252 {
4253 	p_hxge_hw_list_t	hw_p;
4254 	dev_info_t		*p_dip;
4255 
4256 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4257 
4258 	p_dip = hxgep->p_dip;
4259 	MUTEX_ENTER(&hxge_common_lock);
4260 
4261 	/*
4262 	 * Loop through existing per Hydra hardware list.
4263 	 */
4264 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4265 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4266 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4267 		    hw_p, p_dip));
4268 		if (hw_p->parent_devp == p_dip) {
4269 			hxgep->hxge_hw_p = hw_p;
4270 			hw_p->ndevs++;
4271 			hw_p->hxge_p = hxgep;
4272 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4273 			    "==> hxge_init_common_device: "
4274 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4275 			    hw_p, p_dip, hw_p->ndevs));
4276 			break;
4277 		}
4278 	}
4279 
4280 	if (hw_p == NULL) {
4281 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4282 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4283 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4284 		hw_p->parent_devp = p_dip;
4285 		hw_p->magic = HXGE_MAGIC;
4286 		hxgep->hxge_hw_p = hw_p;
4287 		hw_p->ndevs++;
4288 		hw_p->hxge_p = hxgep;
4289 		hw_p->next = hxge_hw_list;
4290 
4291 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4292 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4293 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4294 
4295 		hxge_hw_list = hw_p;
4296 	}
4297 	MUTEX_EXIT(&hxge_common_lock);
4298 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4299 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4300 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4301 
4302 	return (HXGE_OK);
4303 }
4304 
4305 static void
4306 hxge_uninit_common_dev(p_hxge_t hxgep)
4307 {
4308 	p_hxge_hw_list_t	hw_p, h_hw_p;
4309 	dev_info_t		*p_dip;
4310 
4311 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4312 	if (hxgep->hxge_hw_p == NULL) {
4313 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4314 		    "<== hxge_uninit_common_dev (no common)"));
4315 		return;
4316 	}
4317 
4318 	MUTEX_ENTER(&hxge_common_lock);
4319 	h_hw_p = hxge_hw_list;
4320 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4321 		p_dip = hw_p->parent_devp;
4322 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4323 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4324 		    hw_p->magic == HXGE_MAGIC) {
4325 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4326 			    "==> hxge_uninit_common_dev: "
4327 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4328 			    hw_p, p_dip, hw_p->ndevs));
4329 
4330 			hxgep->hxge_hw_p = NULL;
4331 			if (hw_p->ndevs) {
4332 				hw_p->ndevs--;
4333 			}
4334 			hw_p->hxge_p = NULL;
4335 			if (!hw_p->ndevs) {
4336 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4337 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4338 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4339 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4340 				    "==> hxge_uninit_common_dev: "
4341 				    "hw_p $%p parent dip $%p ndevs %d (last)",
4342 				    hw_p, p_dip, hw_p->ndevs));
4343 
4344 				if (hw_p == hxge_hw_list) {
4345 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4346 					    "==> hxge_uninit_common_dev:"
4347 					    "remove head "
4348 					    "hw_p $%p parent dip $%p "
4349 					    "ndevs %d (head)",
4350 					    hw_p, p_dip, hw_p->ndevs));
4351 					hxge_hw_list = hw_p->next;
4352 				} else {
4353 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4354 					    "==> hxge_uninit_common_dev:"
4355 					    "remove middle "
4356 					    "hw_p $%p parent dip $%p "
4357 					    "ndevs %d (middle)",
4358 					    hw_p, p_dip, hw_p->ndevs));
4359 					h_hw_p->next = hw_p->next;
4360 				}
4361 
4362 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4363 			}
4364 			break;
4365 		} else {
4366 			h_hw_p = hw_p;
4367 		}
4368 	}
4369 
4370 	MUTEX_EXIT(&hxge_common_lock);
4371 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4372 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4373 
4374 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4375 }
4376 
4377 #define	HXGE_MSIX_ENTRIES		32
4378 #define	HXGE_MSIX_WAIT_COUNT		10
4379 #define	HXGE_MSIX_PARITY_CHECK_COUNT	30
4380 
4381 static void
4382 hxge_link_poll(void *arg)
4383 {
4384 	p_hxge_t		hxgep = (p_hxge_t)arg;
4385 	hpi_handle_t		handle;
4386 	cip_link_stat_t		link_stat;
4387 	hxge_timeout		*to = &hxgep->timeout;
4388 
4389 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
4390 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4391 
4392 	if (to->report_link_status ||
4393 	    (to->link_status != link_stat.bits.xpcs0_link_up)) {
4394 		to->link_status = link_stat.bits.xpcs0_link_up;
4395 		to->report_link_status = B_FALSE;
4396 
4397 		if (link_stat.bits.xpcs0_link_up) {
4398 			hxge_link_update(hxgep, LINK_STATE_UP);
4399 		} else {
4400 			hxge_link_update(hxgep, LINK_STATE_DOWN);
4401 		}
4402 	}
4403 
4404 	/* Restart the link status timer to check the link status */
4405 	MUTEX_ENTER(&to->lock);
4406 	to->id = timeout(hxge_link_poll, arg, to->ticks);
4407 	MUTEX_EXIT(&to->lock);
4408 }
4409 
4410 static void
4411 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4412 {
4413 	p_hxge_stats_t		statsp = (p_hxge_stats_t)hxgep->statsp;
4414 
4415 	mac_link_update(hxgep->mach, state);
4416 	if (state == LINK_STATE_UP) {
4417 		statsp->mac_stats.link_speed = 10000;
4418 		statsp->mac_stats.link_duplex = 2;
4419 		statsp->mac_stats.link_up = 1;
4420 	} else {
4421 		statsp->mac_stats.link_speed = 0;
4422 		statsp->mac_stats.link_duplex = 0;
4423 		statsp->mac_stats.link_up = 0;
4424 	}
4425 }
4426 
4427 static void
4428 hxge_msix_init(p_hxge_t hxgep)
4429 {
4430 	uint32_t 		data0;
4431 	uint32_t 		data1;
4432 	uint32_t 		data2;
4433 	int			i;
4434 	uint32_t		msix_entry0;
4435 	uint32_t		msix_entry1;
4436 	uint32_t		msix_entry2;
4437 	uint32_t		msix_entry3;
4438 
4439 	/* Change to use MSIx bar instead of indirect access */
4440 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4441 		data0 = 0xffffffff - i;
4442 		data1 = 0xffffffff - i - 1;
4443 		data2 = 0xffffffff - i - 2;
4444 
4445 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4446 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4447 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4448 		HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
4449 	}
4450 
4451 	/* Initialize ram data out buffer. */
4452 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4453 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4454 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4455 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4456 		HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4457 	}
4458 }
4459 
4460 /*
4461  * The following function is to support
4462  * PSARC/2007/453 MSI-X interrupt limit override.
4463  */
4464 static int
4465 hxge_create_msi_property(p_hxge_t hxgep)
4466 {
4467 	int	nmsi;
4468 	extern	int ncpus;
4469 
4470 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4471 
4472 	(void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4473 	    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4474 	/*
4475 	 * The maximum MSI-X requested will be 8.
4476 	 * If the # of CPUs is less than 8, we will reqeust
4477 	 * # MSI-X based on the # of CPUs.
4478 	 */
4479 	if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4480 		nmsi = HXGE_MSIX_REQUEST_10G;
4481 	} else {
4482 		nmsi = ncpus;
4483 	}
4484 
4485 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4486 	    "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4487 	    ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4488 	    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4489 
4490 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4491 	return (nmsi);
4492 }
4493