xref: /titanic_52/usr/src/uts/common/io/hxge/hxge_main.c (revision fcf3ce441efd61da9bb2884968af01cb7c1452cc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28  */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31 
32 /*
33  * PSARC/2007/453 MSI-X interrupt limit override
34  * (This PSARC case is limited to MSI-X vectors
35  *  and SPARC platforms only).
36  */
37 #if defined(_BIG_ENDIAN)
38 uint32_t hxge_msi_enable = 2;
39 #else
40 uint32_t hxge_msi_enable = 1;
41 #endif
42 
43 /*
44  * Globals: tunable parameters (/etc/system or adb)
45  *
46  */
47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
48 uint32_t hxge_rbr_spare_size = 0;
49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
55 
56 static hxge_os_mutex_t hxgedebuglock;
57 static int hxge_debug_init = 0;
58 
59 /*
60  * Debugging flags:
61  *		hxge_no_tx_lb : transmit load balancing
62  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
63  *				   1 - From the Stack
64  *				   2 - Destination IP Address
65  */
66 uint32_t hxge_no_tx_lb = 0;
67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
68 
69 /*
70  * Add tunable to reduce the amount of time spent in the
71  * ISR doing Rx Processing.
72  */
73 uint32_t hxge_max_rx_pkts = 1024;
74 
75 /*
76  * Tunables to manage the receive buffer blocks.
77  *
78  * hxge_rx_threshold_hi: copy all buffers.
79  * hxge_rx_bcopy_size_type: receive buffer block size type.
80  * hxge_rx_threshold_lo: copy only up to tunable block size type.
81  */
82 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
83 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
84 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3;
85 
86 rtrace_t hpi_rtracebuf;
87 
88 /*
89  * Function Prototypes
90  */
91 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
92 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
93 static void hxge_unattach(p_hxge_t);
94 
95 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
96 
97 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
98 static void hxge_destroy_mutexes(p_hxge_t);
99 
100 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
101 static void hxge_unmap_regs(p_hxge_t hxgep);
102 
103 hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
104 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
105 static void hxge_remove_intrs(p_hxge_t hxgep);
106 static void hxge_remove_soft_intrs(p_hxge_t hxgep);
107 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
108 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
109 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
110 void hxge_intrs_enable(p_hxge_t hxgep);
111 static void hxge_intrs_disable(p_hxge_t hxgep);
112 static void hxge_suspend(p_hxge_t);
113 static hxge_status_t hxge_resume(p_hxge_t);
114 hxge_status_t hxge_setup_dev(p_hxge_t);
115 static void hxge_destroy_dev(p_hxge_t);
116 hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
117 static void hxge_free_mem_pool(p_hxge_t);
118 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
119 static void hxge_free_rx_mem_pool(p_hxge_t);
120 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
121 static void hxge_free_tx_mem_pool(p_hxge_t);
122 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
123     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
124     p_hxge_dma_common_t);
125 static void hxge_dma_mem_free(p_hxge_dma_common_t);
126 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
127     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
128 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
129 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
130     p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
131 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
132 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
133     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
134 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
135 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
136     p_hxge_dma_common_t *, size_t);
137 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
138 static int hxge_init_common_dev(p_hxge_t);
139 static void hxge_uninit_common_dev(p_hxge_t);
140 
141 /*
142  * The next declarations are for the GLDv3 interface.
143  */
144 static int hxge_m_start(void *);
145 static void hxge_m_stop(void *);
146 static int hxge_m_unicst(void *, const uint8_t *);
147 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
148 static int hxge_m_promisc(void *, boolean_t);
149 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
150 static void hxge_m_resources(void *);
151 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
152 
153 static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
154 static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
155 static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
156 static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
157 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
158 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
159 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
160     uint_t pr_valsize, const void *pr_val);
161 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
162     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
163 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num,
164     uint_t pr_valsize, void *pr_val);
165 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
166     uint_t pr_valsize, const void *pr_val);
167 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
168     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
169 static void hxge_link_poll(void *arg);
170 
171 mac_priv_prop_t hxge_priv_props[] = {
172 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
173 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
174 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
175 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
176 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
177 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
178 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
179 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
180 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
181 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
182 };
183 
184 #define	HXGE_MAX_PRIV_PROPS	\
185 	(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
186 
187 #define	HXGE_MAGIC	0x4E584745UL
188 #define	MAX_DUMP_SZ 256
189 
190 #define	HXGE_M_CALLBACK_FLAGS	\
191 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
192 
193 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
194 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
195 
196 static mac_callbacks_t hxge_m_callbacks = {
197 	HXGE_M_CALLBACK_FLAGS,
198 	hxge_m_stat,
199 	hxge_m_start,
200 	hxge_m_stop,
201 	hxge_m_promisc,
202 	hxge_m_multicst,
203 	hxge_m_unicst,
204 	hxge_m_tx,
205 	hxge_m_resources,
206 	hxge_m_ioctl,
207 	hxge_m_getcapab,
208 	NULL,
209 	NULL,
210 	hxge_m_setprop,
211 	hxge_m_getprop
212 };
213 
214 /* Enable debug messages as necessary. */
215 uint64_t hxge_debug_level = 0;
216 
217 /*
218  * This list contains the instance structures for the Hydra
219  * devices present in the system. The lock exists to guarantee
220  * mutually exclusive access to the list.
221  */
222 void *hxge_list = NULL;
223 void *hxge_hw_list = NULL;
224 hxge_os_mutex_t hxge_common_lock;
225 
226 extern uint64_t hpi_debug_level;
227 
228 extern hxge_status_t hxge_ldgv_init();
229 extern hxge_status_t hxge_ldgv_uninit();
230 extern hxge_status_t hxge_intr_ldgv_init();
231 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
232     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
233 extern void hxge_fm_fini(p_hxge_t hxgep);
234 
235 /*
236  * Count used to maintain the number of buffers being used
237  * by Hydra instances and loaned up to the upper layers.
238  */
239 uint32_t hxge_mblks_pending = 0;
240 
241 /*
242  * Device register access attributes for PIO.
243  */
244 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
245 	DDI_DEVICE_ATTR_V0,
246 	DDI_STRUCTURE_LE_ACC,
247 	DDI_STRICTORDER_ACC,
248 };
249 
250 /*
251  * Device descriptor access attributes for DMA.
252  */
253 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
254 	DDI_DEVICE_ATTR_V0,
255 	DDI_STRUCTURE_LE_ACC,
256 	DDI_STRICTORDER_ACC
257 };
258 
259 /*
260  * Device buffer access attributes for DMA.
261  */
262 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
263 	DDI_DEVICE_ATTR_V0,
264 	DDI_STRUCTURE_BE_ACC,
265 	DDI_STRICTORDER_ACC
266 };
267 
268 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
269 	DMA_ATTR_V0,		/* version number. */
270 	0,			/* low address */
271 	0xffffffffffffffff,	/* high address */
272 	0xffffffffffffffff,	/* address counter max */
273 	0x80000,		/* alignment */
274 	0xfc00fc,		/* dlim_burstsizes */
275 	0x1,			/* minimum transfer size */
276 	0xffffffffffffffff,	/* maximum transfer size */
277 	0xffffffffffffffff,	/* maximum segment size */
278 	1,			/* scatter/gather list length */
279 	(unsigned int)1,	/* granularity */
280 	0			/* attribute flags */
281 };
282 
283 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
284 	DMA_ATTR_V0,		/* version number. */
285 	0,			/* low address */
286 	0xffffffffffffffff,	/* high address */
287 	0xffffffffffffffff,	/* address counter max */
288 	0x100000,		/* alignment */
289 	0xfc00fc,		/* dlim_burstsizes */
290 	0x1,			/* minimum transfer size */
291 	0xffffffffffffffff,	/* maximum transfer size */
292 	0xffffffffffffffff,	/* maximum segment size */
293 	1,			/* scatter/gather list length */
294 	(unsigned int)1,	/* granularity */
295 	0			/* attribute flags */
296 };
297 
298 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
299 	DMA_ATTR_V0,		/* version number. */
300 	0,			/* low address */
301 	0xffffffffffffffff,	/* high address */
302 	0xffffffffffffffff,	/* address counter max */
303 	0x40000,		/* alignment */
304 	0xfc00fc,		/* dlim_burstsizes */
305 	0x1,			/* minimum transfer size */
306 	0xffffffffffffffff,	/* maximum transfer size */
307 	0xffffffffffffffff,	/* maximum segment size */
308 	1,			/* scatter/gather list length */
309 	(unsigned int)1,	/* granularity */
310 	0			/* attribute flags */
311 };
312 
313 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
314 	DMA_ATTR_V0,		/* version number. */
315 	0,			/* low address */
316 	0xffffffffffffffff,	/* high address */
317 	0xffffffffffffffff,	/* address counter max */
318 #if defined(_BIG_ENDIAN)
319 	0x2000,			/* alignment */
320 #else
321 	0x1000,			/* alignment */
322 #endif
323 	0xfc00fc,		/* dlim_burstsizes */
324 	0x1,			/* minimum transfer size */
325 	0xffffffffffffffff,	/* maximum transfer size */
326 	0xffffffffffffffff,	/* maximum segment size */
327 	5,			/* scatter/gather list length */
328 	(unsigned int)1,	/* granularity */
329 	0			/* attribute flags */
330 };
331 
332 ddi_dma_attr_t hxge_tx_dma_attr = {
333 	DMA_ATTR_V0,		/* version number. */
334 	0,			/* low address */
335 	0xffffffffffffffff,	/* high address */
336 	0xffffffffffffffff,	/* address counter max */
337 #if defined(_BIG_ENDIAN)
338 	0x2000,			/* alignment */
339 #else
340 	0x1000,			/* alignment */
341 #endif
342 	0xfc00fc,		/* dlim_burstsizes */
343 	0x1,			/* minimum transfer size */
344 	0xffffffffffffffff,	/* maximum transfer size */
345 	0xffffffffffffffff,	/* maximum segment size */
346 	5,			/* scatter/gather list length */
347 	(unsigned int)1,	/* granularity */
348 	0			/* attribute flags */
349 };
350 
351 ddi_dma_attr_t hxge_rx_dma_attr = {
352 	DMA_ATTR_V0,		/* version number. */
353 	0,			/* low address */
354 	0xffffffffffffffff,	/* high address */
355 	0xffffffffffffffff,	/* address counter max */
356 	0x10000,		/* alignment */
357 	0xfc00fc,		/* dlim_burstsizes */
358 	0x1,			/* minimum transfer size */
359 	0xffffffffffffffff,	/* maximum transfer size */
360 	0xffffffffffffffff,	/* maximum segment size */
361 	1,			/* scatter/gather list length */
362 	(unsigned int)1,	/* granularity */
363 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
364 };
365 
366 ddi_dma_lim_t hxge_dma_limits = {
367 	(uint_t)0,		/* dlim_addr_lo */
368 	(uint_t)0xffffffff,	/* dlim_addr_hi */
369 	(uint_t)0xffffffff,	/* dlim_cntr_max */
370 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
371 	0x1,			/* dlim_minxfer */
372 	1024			/* dlim_speed */
373 };
374 
375 dma_method_t hxge_force_dma = DVMA;
376 
377 /*
378  * dma chunk sizes.
379  *
380  * Try to allocate the largest possible size
381  * so that fewer number of dma chunks would be managed
382  */
383 size_t alloc_sizes[] = {
384     0x1000, 0x2000, 0x4000, 0x8000,
385     0x10000, 0x20000, 0x40000, 0x80000,
386     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
387 };
388 
389 /*
390  * Translate "dev_t" to a pointer to the associated "dev_info_t".
391  */
392 static int
393 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
394 {
395 	p_hxge_t	hxgep = NULL;
396 	int		instance;
397 	int		status = DDI_SUCCESS;
398 
399 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
400 
401 	/*
402 	 * Get the device instance since we'll need to setup or retrieve a soft
403 	 * state for this instance.
404 	 */
405 	instance = ddi_get_instance(dip);
406 
407 	switch (cmd) {
408 	case DDI_ATTACH:
409 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
410 		break;
411 
412 	case DDI_RESUME:
413 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
414 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
415 		if (hxgep == NULL) {
416 			status = DDI_FAILURE;
417 			break;
418 		}
419 		if (hxgep->dip != dip) {
420 			status = DDI_FAILURE;
421 			break;
422 		}
423 		if (hxgep->suspended == DDI_PM_SUSPEND) {
424 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
425 		} else {
426 			(void) hxge_resume(hxgep);
427 		}
428 		goto hxge_attach_exit;
429 
430 	case DDI_PM_RESUME:
431 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
432 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
433 		if (hxgep == NULL) {
434 			status = DDI_FAILURE;
435 			break;
436 		}
437 		if (hxgep->dip != dip) {
438 			status = DDI_FAILURE;
439 			break;
440 		}
441 		(void) hxge_resume(hxgep);
442 		goto hxge_attach_exit;
443 
444 	default:
445 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
446 		status = DDI_FAILURE;
447 		goto hxge_attach_exit;
448 	}
449 
450 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
451 		status = DDI_FAILURE;
452 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
453 		    "ddi_soft_state_zalloc failed"));
454 		goto hxge_attach_exit;
455 	}
456 
457 	hxgep = ddi_get_soft_state(hxge_list, instance);
458 	if (hxgep == NULL) {
459 		status = HXGE_ERROR;
460 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
461 		    "ddi_get_soft_state failed"));
462 		goto hxge_attach_fail2;
463 	}
464 
465 	hxgep->drv_state = 0;
466 	hxgep->dip = dip;
467 	hxgep->instance = instance;
468 	hxgep->p_dip = ddi_get_parent(dip);
469 	hxgep->hxge_debug_level = hxge_debug_level;
470 	hpi_debug_level = hxge_debug_level;
471 
472 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
473 	    &hxge_rx_dma_attr);
474 
475 	status = hxge_map_regs(hxgep);
476 	if (status != HXGE_OK) {
477 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
478 		goto hxge_attach_fail3;
479 	}
480 
481 	status = hxge_init_common_dev(hxgep);
482 	if (status != HXGE_OK) {
483 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
484 		    "hxge_init_common_dev failed"));
485 		goto hxge_attach_fail4;
486 	}
487 
488 	/*
489 	 * Setup the Ndd parameters for this instance.
490 	 */
491 	hxge_init_param(hxgep);
492 
493 	/*
494 	 * Setup Register Tracing Buffer.
495 	 */
496 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
497 
498 	/* init stats ptr */
499 	hxge_init_statsp(hxgep);
500 
501 	status = hxge_setup_mutexes(hxgep);
502 	if (status != HXGE_OK) {
503 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
504 		goto hxge_attach_fail;
505 	}
506 
507 	status = hxge_get_config_properties(hxgep);
508 	if (status != HXGE_OK) {
509 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
510 		goto hxge_attach_fail;
511 	}
512 
513 	/*
514 	 * Setup the Kstats for the driver.
515 	 */
516 	hxge_setup_kstats(hxgep);
517 	hxge_setup_param(hxgep);
518 
519 	status = hxge_setup_system_dma_pages(hxgep);
520 	if (status != HXGE_OK) {
521 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
522 		goto hxge_attach_fail;
523 	}
524 
525 	hxge_hw_id_init(hxgep);
526 	hxge_hw_init_niu_common(hxgep);
527 
528 	status = hxge_setup_dev(hxgep);
529 	if (status != DDI_SUCCESS) {
530 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
531 		goto hxge_attach_fail;
532 	}
533 
534 	status = hxge_add_intrs(hxgep);
535 	if (status != DDI_SUCCESS) {
536 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
537 		goto hxge_attach_fail;
538 	}
539 
540 	status = hxge_add_soft_intrs(hxgep);
541 	if (status != DDI_SUCCESS) {
542 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
543 		goto hxge_attach_fail;
544 	}
545 
546 	/*
547 	 * Enable interrupts.
548 	 */
549 	hxge_intrs_enable(hxgep);
550 
551 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
552 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
553 		    "unable to register to mac layer (%d)", status));
554 		goto hxge_attach_fail;
555 	}
556 	mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
557 	hxgep->timeout.link_status = 0;
558 	hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
559 
560 	/* Start the link status timer to check the link status */
561 	MUTEX_ENTER(&hxgep->timeout.lock);
562 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
563 	    hxgep->timeout.ticks);
564 	MUTEX_EXIT(&hxgep->timeout.lock);
565 
566 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
567 	    instance));
568 
569 	goto hxge_attach_exit;
570 
571 hxge_attach_fail:
572 	hxge_unattach(hxgep);
573 	goto hxge_attach_fail1;
574 
575 hxge_attach_fail5:
576 	/*
577 	 * Tear down the ndd parameters setup.
578 	 */
579 	hxge_destroy_param(hxgep);
580 
581 	/*
582 	 * Tear down the kstat setup.
583 	 */
584 	hxge_destroy_kstats(hxgep);
585 
586 hxge_attach_fail4:
587 	if (hxgep->hxge_hw_p) {
588 		hxge_uninit_common_dev(hxgep);
589 		hxgep->hxge_hw_p = NULL;
590 	}
591 hxge_attach_fail3:
592 	/*
593 	 * Unmap the register setup.
594 	 */
595 	hxge_unmap_regs(hxgep);
596 
597 	hxge_fm_fini(hxgep);
598 
599 hxge_attach_fail2:
600 	ddi_soft_state_free(hxge_list, hxgep->instance);
601 
602 hxge_attach_fail1:
603 	if (status != HXGE_OK)
604 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
605 	hxgep = NULL;
606 
607 hxge_attach_exit:
608 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
609 	    status));
610 
611 	return (status);
612 }
613 
614 static int
615 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
616 {
617 	int		status = DDI_SUCCESS;
618 	int		instance;
619 	p_hxge_t	hxgep = NULL;
620 
621 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
622 	instance = ddi_get_instance(dip);
623 	hxgep = ddi_get_soft_state(hxge_list, instance);
624 	if (hxgep == NULL) {
625 		status = DDI_FAILURE;
626 		goto hxge_detach_exit;
627 	}
628 
629 	switch (cmd) {
630 	case DDI_DETACH:
631 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
632 		break;
633 
634 	case DDI_PM_SUSPEND:
635 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
636 		hxgep->suspended = DDI_PM_SUSPEND;
637 		hxge_suspend(hxgep);
638 		break;
639 
640 	case DDI_SUSPEND:
641 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
642 		if (hxgep->suspended != DDI_PM_SUSPEND) {
643 			hxgep->suspended = DDI_SUSPEND;
644 			hxge_suspend(hxgep);
645 		}
646 		break;
647 
648 	default:
649 		status = DDI_FAILURE;
650 		break;
651 	}
652 
653 	if (cmd != DDI_DETACH)
654 		goto hxge_detach_exit;
655 
656 	/*
657 	 * Stop the xcvr polling.
658 	 */
659 	hxgep->suspended = cmd;
660 
661 	/* Stop the link status timer before unregistering */
662 	MUTEX_ENTER(&hxgep->timeout.lock);
663 	if (hxgep->timeout.id)
664 		(void) untimeout(hxgep->timeout.id);
665 	MUTEX_EXIT(&hxgep->timeout.lock);
666 
667 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
668 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
669 		    "<== hxge_detach status = 0x%08X", status));
670 		return (DDI_FAILURE);
671 	}
672 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
673 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
674 
675 	hxge_unattach(hxgep);
676 	hxgep = NULL;
677 
678 hxge_detach_exit:
679 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
680 	    status));
681 
682 	return (status);
683 }
684 
685 static void
686 hxge_unattach(p_hxge_t hxgep)
687 {
688 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
689 
690 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
691 		return;
692 	}
693 
694 	if (hxgep->hxge_hw_p) {
695 		hxge_uninit_common_dev(hxgep);
696 		hxgep->hxge_hw_p = NULL;
697 	}
698 
699 	if (hxgep->hxge_timerid) {
700 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
701 		hxgep->hxge_timerid = 0;
702 	}
703 
704 	/* Stop any further interrupts. */
705 	hxge_remove_intrs(hxgep);
706 
707 	/* Remove soft interrups */
708 	hxge_remove_soft_intrs(hxgep);
709 
710 	/* Stop the device and free resources. */
711 	hxge_destroy_dev(hxgep);
712 
713 	/* Tear down the ndd parameters setup. */
714 	hxge_destroy_param(hxgep);
715 
716 	/* Tear down the kstat setup. */
717 	hxge_destroy_kstats(hxgep);
718 
719 	/*
720 	 * Remove the list of ndd parameters which were setup during attach.
721 	 */
722 	if (hxgep->dip) {
723 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
724 		    " hxge_unattach: remove all properties"));
725 		(void) ddi_prop_remove_all(hxgep->dip);
726 	}
727 
728 	/*
729 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
730 	 * previous state before unmapping the registers.
731 	 */
732 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
733 	HXGE_DELAY(1000);
734 
735 	/*
736 	 * Unmap the register setup.
737 	 */
738 	hxge_unmap_regs(hxgep);
739 
740 	hxge_fm_fini(hxgep);
741 
742 	/* Destroy all mutexes.  */
743 	hxge_destroy_mutexes(hxgep);
744 
745 	/*
746 	 * Free the soft state data structures allocated with this instance.
747 	 */
748 	ddi_soft_state_free(hxge_list, hxgep->instance);
749 
750 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
751 }
752 
753 static hxge_status_t
754 hxge_map_regs(p_hxge_t hxgep)
755 {
756 	int		ddi_status = DDI_SUCCESS;
757 	p_dev_regs_t	dev_regs;
758 
759 #ifdef	HXGE_DEBUG
760 	char		*sysname;
761 #endif
762 
763 	off_t		regsize;
764 	hxge_status_t	status = HXGE_OK;
765 	int		nregs;
766 
767 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
768 
769 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
770 		return (HXGE_ERROR);
771 
772 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
773 
774 	hxgep->dev_regs = NULL;
775 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
776 	dev_regs->hxge_regh = NULL;
777 	dev_regs->hxge_pciregh = NULL;
778 	dev_regs->hxge_msix_regh = NULL;
779 
780 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
781 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
782 	    "hxge_map_regs: pci config size 0x%x", regsize));
783 
784 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
785 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
786 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
787 	if (ddi_status != DDI_SUCCESS) {
788 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
789 		    "ddi_map_regs, hxge bus config regs failed"));
790 		goto hxge_map_regs_fail0;
791 	}
792 
793 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
794 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
795 	    dev_regs->hxge_pciregp,
796 	    dev_regs->hxge_pciregh));
797 
798 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
799 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
800 	    "hxge_map_regs: pio size 0x%x", regsize));
801 
802 	/* set up the device mapped register */
803 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
804 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
805 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
806 
807 	if (ddi_status != DDI_SUCCESS) {
808 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
809 		    "ddi_map_regs for Hydra global reg failed"));
810 		goto hxge_map_regs_fail1;
811 	}
812 
813 	/* set up the msi/msi-x mapped register */
814 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
815 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
816 	    "hxge_map_regs: msix size 0x%x", regsize));
817 
818 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
819 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
820 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
821 
822 	if (ddi_status != DDI_SUCCESS) {
823 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
824 		    "ddi_map_regs for msi reg failed"));
825 		goto hxge_map_regs_fail2;
826 	}
827 
828 	hxgep->dev_regs = dev_regs;
829 
830 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
831 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
832 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
833 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
834 
835 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
836 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
837 
838 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
839 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
840 
841 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
842 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
843 
844 	goto hxge_map_regs_exit;
845 
846 hxge_map_regs_fail3:
847 	if (dev_regs->hxge_msix_regh) {
848 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
849 	}
850 
851 hxge_map_regs_fail2:
852 	if (dev_regs->hxge_regh) {
853 		ddi_regs_map_free(&dev_regs->hxge_regh);
854 	}
855 
856 hxge_map_regs_fail1:
857 	if (dev_regs->hxge_pciregh) {
858 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
859 	}
860 
861 hxge_map_regs_fail0:
862 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
863 	kmem_free(dev_regs, sizeof (dev_regs_t));
864 
865 hxge_map_regs_exit:
866 	if (ddi_status != DDI_SUCCESS)
867 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
868 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
869 	return (status);
870 }
871 
872 static void
873 hxge_unmap_regs(p_hxge_t hxgep)
874 {
875 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
876 	if (hxgep->dev_regs) {
877 		if (hxgep->dev_regs->hxge_pciregh) {
878 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
879 			    "==> hxge_unmap_regs: bus"));
880 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
881 			hxgep->dev_regs->hxge_pciregh = NULL;
882 		}
883 
884 		if (hxgep->dev_regs->hxge_regh) {
885 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
886 			    "==> hxge_unmap_regs: device registers"));
887 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
888 			hxgep->dev_regs->hxge_regh = NULL;
889 		}
890 
891 		if (hxgep->dev_regs->hxge_msix_regh) {
892 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
893 			    "==> hxge_unmap_regs: device interrupts"));
894 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
895 			hxgep->dev_regs->hxge_msix_regh = NULL;
896 		}
897 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
898 		hxgep->dev_regs = NULL;
899 	}
900 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
901 }
902 
903 static hxge_status_t
904 hxge_setup_mutexes(p_hxge_t hxgep)
905 {
906 	int		ddi_status = DDI_SUCCESS;
907 	hxge_status_t	status = HXGE_OK;
908 
909 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
910 
911 	/*
912 	 * Get the interrupt cookie so the mutexes can be Initialised.
913 	 */
914 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
915 	    &hxgep->interrupt_cookie);
916 
917 	if (ddi_status != DDI_SUCCESS) {
918 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
919 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
920 		goto hxge_setup_mutexes_exit;
921 	}
922 
923 	/*
924 	 * Initialize mutex's for this device.
925 	 */
926 	MUTEX_INIT(hxgep->genlock, NULL,
927 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
928 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
929 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
930 	RW_INIT(&hxgep->filter_lock, NULL,
931 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
932 	MUTEX_INIT(&hxgep->pio_lock, NULL,
933 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
934 	MUTEX_INIT(&hxgep->timeout.lock, NULL,
935 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
936 
937 hxge_setup_mutexes_exit:
938 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
939 	    "<== hxge_setup_mutexes status = %x", status));
940 
941 	if (ddi_status != DDI_SUCCESS)
942 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
943 
944 	return (status);
945 }
946 
947 static void
948 hxge_destroy_mutexes(p_hxge_t hxgep)
949 {
950 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
951 	RW_DESTROY(&hxgep->filter_lock);
952 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
953 	MUTEX_DESTROY(hxgep->genlock);
954 	MUTEX_DESTROY(&hxgep->pio_lock);
955 	MUTEX_DESTROY(&hxgep->timeout.lock);
956 
957 	if (hxge_debug_init == 1) {
958 		MUTEX_DESTROY(&hxgedebuglock);
959 		hxge_debug_init = 0;
960 	}
961 
962 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
963 }
964 
965 hxge_status_t
966 hxge_init(p_hxge_t hxgep)
967 {
968 	hxge_status_t status = HXGE_OK;
969 
970 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
971 
972 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
973 		return (status);
974 	}
975 
976 	/*
977 	 * Allocate system memory for the receive/transmit buffer blocks and
978 	 * receive/transmit descriptor rings.
979 	 */
980 	status = hxge_alloc_mem_pool(hxgep);
981 	if (status != HXGE_OK) {
982 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
983 		goto hxge_init_fail1;
984 	}
985 
986 	/*
987 	 * Initialize and enable TXDMA channels.
988 	 */
989 	status = hxge_init_txdma_channels(hxgep);
990 	if (status != HXGE_OK) {
991 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
992 		goto hxge_init_fail3;
993 	}
994 
995 	/*
996 	 * Initialize and enable RXDMA channels.
997 	 */
998 	status = hxge_init_rxdma_channels(hxgep);
999 	if (status != HXGE_OK) {
1000 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
1001 		goto hxge_init_fail4;
1002 	}
1003 
1004 	/*
1005 	 * Initialize TCAM
1006 	 */
1007 	status = hxge_classify_init(hxgep);
1008 	if (status != HXGE_OK) {
1009 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1010 		goto hxge_init_fail5;
1011 	}
1012 
1013 	/*
1014 	 * Initialize the VMAC block.
1015 	 */
1016 	status = hxge_vmac_init(hxgep);
1017 	if (status != HXGE_OK) {
1018 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1019 		goto hxge_init_fail5;
1020 	}
1021 
1022 	/* Bringup - this may be unnecessary when PXE and FCODE available */
1023 	status = hxge_pfc_set_default_mac_addr(hxgep);
1024 	if (status != HXGE_OK) {
1025 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1026 		    "Default Address Failure\n"));
1027 		goto hxge_init_fail5;
1028 	}
1029 
1030 	hxge_intrs_enable(hxgep);
1031 
1032 	/*
1033 	 * Enable hardware interrupts.
1034 	 */
1035 	hxge_intr_hw_enable(hxgep);
1036 	hxgep->drv_state |= STATE_HW_INITIALIZED;
1037 
1038 	goto hxge_init_exit;
1039 
1040 hxge_init_fail5:
1041 	hxge_uninit_rxdma_channels(hxgep);
1042 hxge_init_fail4:
1043 	hxge_uninit_txdma_channels(hxgep);
1044 hxge_init_fail3:
1045 	hxge_free_mem_pool(hxgep);
1046 hxge_init_fail1:
1047 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1048 	    "<== hxge_init status (failed) = 0x%08x", status));
1049 	return (status);
1050 
1051 hxge_init_exit:
1052 
1053 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1054 	    status));
1055 
1056 	return (status);
1057 }
1058 
1059 timeout_id_t
1060 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1061 {
1062 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1063 		return (timeout(func, (caddr_t)hxgep,
1064 		    drv_usectohz(1000 * msec)));
1065 	}
1066 	return (NULL);
1067 }
1068 
1069 /*ARGSUSED*/
1070 void
1071 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1072 {
1073 	if (timerid) {
1074 		(void) untimeout(timerid);
1075 	}
1076 }
1077 
1078 void
1079 hxge_uninit(p_hxge_t hxgep)
1080 {
1081 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1082 
1083 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1084 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1085 		    "==> hxge_uninit: not initialized"));
1086 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1087 		return;
1088 	}
1089 
1090 	/* Stop timer */
1091 	if (hxgep->hxge_timerid) {
1092 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1093 		hxgep->hxge_timerid = 0;
1094 	}
1095 
1096 	(void) hxge_intr_hw_disable(hxgep);
1097 
1098 	/* Reset the receive VMAC side.  */
1099 	(void) hxge_rx_vmac_disable(hxgep);
1100 
1101 	/* Free classification resources */
1102 	(void) hxge_classify_uninit(hxgep);
1103 
1104 	/* Reset the transmit/receive DMA side.  */
1105 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1106 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1107 
1108 	hxge_uninit_txdma_channels(hxgep);
1109 	hxge_uninit_rxdma_channels(hxgep);
1110 
1111 	/* Reset the transmit VMAC side.  */
1112 	(void) hxge_tx_vmac_disable(hxgep);
1113 
1114 	hxge_free_mem_pool(hxgep);
1115 
1116 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1117 
1118 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1119 }
1120 
1121 void
1122 hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
1123 {
1124 #if defined(__i386)
1125 	size_t		reg;
1126 #else
1127 	uint64_t	reg;
1128 #endif
1129 	uint64_t	regdata;
1130 	int		i, retry;
1131 
1132 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1133 	regdata = 0;
1134 	retry = 1;
1135 
1136 	for (i = 0; i < retry; i++) {
1137 		HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
1138 	}
1139 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1140 }
1141 
1142 void
1143 hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
1144 {
1145 #if defined(__i386)
1146 	size_t		reg;
1147 #else
1148 	uint64_t	reg;
1149 #endif
1150 	uint64_t	buf[2];
1151 
1152 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1153 #if defined(__i386)
1154 	reg = (size_t)buf[0];
1155 #else
1156 	reg = buf[0];
1157 #endif
1158 
1159 	HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
1160 }
1161 
1162 /*ARGSUSED*/
1163 /*VARARGS*/
1164 void
1165 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1166 {
1167 	char		msg_buffer[1048];
1168 	char		prefix_buffer[32];
1169 	int		instance;
1170 	uint64_t	debug_level;
1171 	int		cmn_level = CE_CONT;
1172 	va_list		ap;
1173 
1174 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1175 	    hxgep->hxge_debug_level;
1176 
1177 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1178 	    (level == HXGE_ERR_CTL)) {
1179 		/* do the msg processing */
1180 		if (hxge_debug_init == 0) {
1181 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1182 			hxge_debug_init = 1;
1183 		}
1184 
1185 		MUTEX_ENTER(&hxgedebuglock);
1186 
1187 		if ((level & HXGE_NOTE)) {
1188 			cmn_level = CE_NOTE;
1189 		}
1190 
1191 		if (level & HXGE_ERR_CTL) {
1192 			cmn_level = CE_WARN;
1193 		}
1194 
1195 		va_start(ap, fmt);
1196 		(void) vsprintf(msg_buffer, fmt, ap);
1197 		va_end(ap);
1198 
1199 		if (hxgep == NULL) {
1200 			instance = -1;
1201 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1202 		} else {
1203 			instance = hxgep->instance;
1204 			(void) sprintf(prefix_buffer,
1205 			    "%s%d :", "hxge", instance);
1206 		}
1207 
1208 		MUTEX_EXIT(&hxgedebuglock);
1209 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1210 	}
1211 }
1212 
1213 char *
1214 hxge_dump_packet(char *addr, int size)
1215 {
1216 	uchar_t		*ap = (uchar_t *)addr;
1217 	int		i;
1218 	static char	etherbuf[1024];
1219 	char		*cp = etherbuf;
1220 	char		digits[] = "0123456789abcdef";
1221 
1222 	if (!size)
1223 		size = 60;
1224 
1225 	if (size > MAX_DUMP_SZ) {
1226 		/* Dump the leading bytes */
1227 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1228 			if (*ap > 0x0f)
1229 				*cp++ = digits[*ap >> 4];
1230 			*cp++ = digits[*ap++ & 0xf];
1231 			*cp++ = ':';
1232 		}
1233 		for (i = 0; i < 20; i++)
1234 			*cp++ = '.';
1235 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1236 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1237 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1238 			if (*ap > 0x0f)
1239 				*cp++ = digits[*ap >> 4];
1240 			*cp++ = digits[*ap++ & 0xf];
1241 			*cp++ = ':';
1242 		}
1243 	} else {
1244 		for (i = 0; i < size; i++) {
1245 			if (*ap > 0x0f)
1246 				*cp++ = digits[*ap >> 4];
1247 			*cp++ = digits[*ap++ & 0xf];
1248 			*cp++ = ':';
1249 		}
1250 	}
1251 	*--cp = 0;
1252 	return (etherbuf);
1253 }
1254 
1255 static void
1256 hxge_suspend(p_hxge_t hxgep)
1257 {
1258 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1259 
1260 	hxge_intrs_disable(hxgep);
1261 	hxge_destroy_dev(hxgep);
1262 
1263 	/* Stop the link status timer */
1264 	MUTEX_ENTER(&hxgep->timeout.lock);
1265 	if (hxgep->timeout.id)
1266 		(void) untimeout(hxgep->timeout.id);
1267 	MUTEX_EXIT(&hxgep->timeout.lock);
1268 
1269 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1270 }
1271 
1272 static hxge_status_t
1273 hxge_resume(p_hxge_t hxgep)
1274 {
1275 	hxge_status_t status = HXGE_OK;
1276 
1277 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1278 	hxgep->suspended = DDI_RESUME;
1279 
1280 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1281 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1282 
1283 	(void) hxge_rx_vmac_enable(hxgep);
1284 	(void) hxge_tx_vmac_enable(hxgep);
1285 
1286 	hxge_intrs_enable(hxgep);
1287 
1288 	hxgep->suspended = 0;
1289 
1290 	/* Resume the link status timer */
1291 	MUTEX_ENTER(&hxgep->timeout.lock);
1292 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1293 	    hxgep->timeout.ticks);
1294 	MUTEX_EXIT(&hxgep->timeout.lock);
1295 
1296 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1297 	    "<== hxge_resume status = 0x%x", status));
1298 
1299 	return (status);
1300 }
1301 
1302 hxge_status_t
1303 hxge_setup_dev(p_hxge_t hxgep)
1304 {
1305 	hxge_status_t status = HXGE_OK;
1306 
1307 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1308 
1309 	status = hxge_link_init(hxgep);
1310 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1311 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1312 		    "Bad register acc handle"));
1313 		status = HXGE_ERROR;
1314 	}
1315 
1316 	if (status != HXGE_OK) {
1317 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1318 		    " hxge_setup_dev status (link init 0x%08x)", status));
1319 		goto hxge_setup_dev_exit;
1320 	}
1321 
1322 hxge_setup_dev_exit:
1323 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1324 	    "<== hxge_setup_dev status = 0x%08x", status));
1325 
1326 	return (status);
1327 }
1328 
1329 static void
1330 hxge_destroy_dev(p_hxge_t hxgep)
1331 {
1332 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1333 
1334 	(void) hxge_hw_stop(hxgep);
1335 
1336 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1337 }
1338 
1339 static hxge_status_t
1340 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1341 {
1342 	int			ddi_status = DDI_SUCCESS;
1343 	uint_t			count;
1344 	ddi_dma_cookie_t	cookie;
1345 	uint_t			iommu_pagesize;
1346 	hxge_status_t		status = HXGE_OK;
1347 
1348 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1349 
1350 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1351 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1352 
1353 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1354 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1355 	    " default_block_size %d iommu_pagesize %d",
1356 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1357 	    hxgep->rx_default_block_size, iommu_pagesize));
1358 
1359 	if (iommu_pagesize != 0) {
1360 		if (hxgep->sys_page_sz == iommu_pagesize) {
1361 			/* Hydra support up to 8K pages */
1362 			if (iommu_pagesize > 0x2000)
1363 				hxgep->sys_page_sz = 0x2000;
1364 		} else {
1365 			if (hxgep->sys_page_sz > iommu_pagesize)
1366 				hxgep->sys_page_sz = iommu_pagesize;
1367 		}
1368 	}
1369 
1370 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1371 
1372 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1373 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1374 	    "default_block_size %d page mask %d",
1375 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1376 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1377 
1378 	switch (hxgep->sys_page_sz) {
1379 	default:
1380 		hxgep->sys_page_sz = 0x1000;
1381 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1382 		hxgep->rx_default_block_size = 0x1000;
1383 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1384 		break;
1385 	case 0x1000:
1386 		hxgep->rx_default_block_size = 0x1000;
1387 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1388 		break;
1389 	case 0x2000:
1390 		hxgep->rx_default_block_size = 0x2000;
1391 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1392 		break;
1393 	}
1394 
1395 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1396 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1397 
1398 	/*
1399 	 * Get the system DMA burst size.
1400 	 */
1401 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1402 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1403 	if (ddi_status != DDI_SUCCESS) {
1404 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1405 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1406 		goto hxge_get_soft_properties_exit;
1407 	}
1408 
1409 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1410 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1411 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1412 	    &cookie, &count);
1413 	if (ddi_status != DDI_DMA_MAPPED) {
1414 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1415 		    "Binding spare handle to find system burstsize failed."));
1416 		ddi_status = DDI_FAILURE;
1417 		goto hxge_get_soft_properties_fail1;
1418 	}
1419 
1420 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1421 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1422 
1423 hxge_get_soft_properties_fail1:
1424 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1425 
1426 hxge_get_soft_properties_exit:
1427 
1428 	if (ddi_status != DDI_SUCCESS)
1429 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1430 
1431 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1432 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1433 
1434 	return (status);
1435 }
1436 
1437 hxge_status_t
1438 hxge_alloc_mem_pool(p_hxge_t hxgep)
1439 {
1440 	hxge_status_t status = HXGE_OK;
1441 
1442 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1443 
1444 	status = hxge_alloc_rx_mem_pool(hxgep);
1445 	if (status != HXGE_OK) {
1446 		return (HXGE_ERROR);
1447 	}
1448 
1449 	status = hxge_alloc_tx_mem_pool(hxgep);
1450 	if (status != HXGE_OK) {
1451 		hxge_free_rx_mem_pool(hxgep);
1452 		return (HXGE_ERROR);
1453 	}
1454 
1455 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1456 	return (HXGE_OK);
1457 }
1458 
1459 static void
1460 hxge_free_mem_pool(p_hxge_t hxgep)
1461 {
1462 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1463 
1464 	hxge_free_rx_mem_pool(hxgep);
1465 	hxge_free_tx_mem_pool(hxgep);
1466 
1467 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1468 }
1469 
1470 static hxge_status_t
1471 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1472 {
1473 	int			i, j;
1474 	uint32_t		ndmas, st_rdc;
1475 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1476 	p_hxge_hw_pt_cfg_t	p_cfgp;
1477 	p_hxge_dma_pool_t	dma_poolp;
1478 	p_hxge_dma_common_t	*dma_buf_p;
1479 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1480 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1481 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1482 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1483 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1484 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1485 	size_t			rx_buf_alloc_size;
1486 	size_t			rx_rbr_cntl_alloc_size;
1487 	size_t			rx_rcr_cntl_alloc_size;
1488 	size_t			rx_mbox_cntl_alloc_size;
1489 	uint32_t		*num_chunks;	/* per dma */
1490 	hxge_status_t		status = HXGE_OK;
1491 
1492 	uint32_t		hxge_port_rbr_size;
1493 	uint32_t		hxge_port_rbr_spare_size;
1494 	uint32_t		hxge_port_rcr_size;
1495 
1496 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1497 
1498 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1499 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1500 	st_rdc = p_cfgp->start_rdc;
1501 	ndmas = p_cfgp->max_rdcs;
1502 
1503 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1504 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1505 
1506 	/*
1507 	 * Allocate memory for each receive DMA channel.
1508 	 */
1509 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1510 	    KM_SLEEP);
1511 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1512 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1513 
1514 	dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1515 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1516 	dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1517 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1518 	dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1519 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1520 	dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1521 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1522 	dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1523 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1524 	dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1525 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1526 
1527 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1528 	    KM_SLEEP);
1529 
1530 	/*
1531 	 * Assume that each DMA channel will be configured with default block
1532 	 * size. rbr block counts are mod of batch count (16).
1533 	 */
1534 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1535 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1536 
1537 	if (!hxge_port_rbr_size) {
1538 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1539 	}
1540 
1541 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1542 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1543 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1544 	}
1545 
1546 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1547 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1548 
1549 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1550 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1551 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1552 	}
1553 
1554 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1555 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1556 
1557 	/*
1558 	 * Addresses of receive block ring, receive completion ring and the
1559 	 * mailbox must be all cache-aligned (64 bytes).
1560 	 */
1561 	rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1562 	rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1563 	rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1564 	rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1565 
1566 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1567 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1568 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1569 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1570 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1571 
1572 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1573 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1574 
1575 	/*
1576 	 * Allocate memory for receive buffers and descriptor rings. Replace
1577 	 * allocation functions with interface functions provided by the
1578 	 * partition manager when it is available.
1579 	 */
1580 	/*
1581 	 * Allocate memory for the receive buffer blocks.
1582 	 */
1583 	for (i = 0; i < ndmas; i++) {
1584 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1585 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1586 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1587 		    i, dma_buf_p[i], &dma_buf_p[i]));
1588 
1589 		num_chunks[i] = 0;
1590 
1591 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1592 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1593 		    &num_chunks[i]);
1594 		if (status != HXGE_OK) {
1595 			break;
1596 		}
1597 
1598 		st_rdc++;
1599 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1600 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1601 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1602 		    dma_buf_p[i], &dma_buf_p[i]));
1603 	}
1604 
1605 	if (i < ndmas) {
1606 		goto hxge_alloc_rx_mem_fail1;
1607 	}
1608 
1609 	/*
1610 	 * Allocate memory for descriptor rings and mailbox.
1611 	 */
1612 	st_rdc = p_cfgp->start_rdc;
1613 	for (j = 0; j < ndmas; j++) {
1614 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1615 		    &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1616 		    rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1617 			break;
1618 		}
1619 
1620 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1621 		    &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1622 		    rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1623 			break;
1624 		}
1625 
1626 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1627 		    &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1628 		    rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1629 			break;
1630 		}
1631 		st_rdc++;
1632 	}
1633 
1634 	if (j < ndmas) {
1635 		goto hxge_alloc_rx_mem_fail2;
1636 	}
1637 
1638 	dma_poolp->ndmas = ndmas;
1639 	dma_poolp->num_chunks = num_chunks;
1640 	dma_poolp->buf_allocated = B_TRUE;
1641 	hxgep->rx_buf_pool_p = dma_poolp;
1642 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1643 
1644 	dma_rbr_cntl_poolp->ndmas = ndmas;
1645 	dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1646 	hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1647 	dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1648 
1649 	dma_rcr_cntl_poolp->ndmas = ndmas;
1650 	dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1651 	hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1652 	dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1653 
1654 	dma_mbox_cntl_poolp->ndmas = ndmas;
1655 	dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1656 	hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1657 	dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1658 
1659 	goto hxge_alloc_rx_mem_pool_exit;
1660 
1661 hxge_alloc_rx_mem_fail2:
1662 	/* Free control buffers */
1663 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1664 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1665 	for (; j >= 0; j--) {
1666 		hxge_free_rx_cntl_dma(hxgep,
1667 		    (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1668 		hxge_free_rx_cntl_dma(hxgep,
1669 		    (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1670 		hxge_free_rx_cntl_dma(hxgep,
1671 		    (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1672 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1673 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1674 	}
1675 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1676 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1677 
1678 hxge_alloc_rx_mem_fail1:
1679 	/* Free data buffers */
1680 	i--;
1681 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1682 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1683 	for (; i >= 0; i--) {
1684 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1685 		    num_chunks[i]);
1686 	}
1687 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1688 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1689 
1690 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1691 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1692 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1693 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1694 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1695 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1696 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1697 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1698 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1699 
1700 hxge_alloc_rx_mem_pool_exit:
1701 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1702 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1703 
1704 	return (status);
1705 }
1706 
1707 static void
1708 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1709 {
1710 	uint32_t		i, ndmas;
1711 	p_hxge_dma_pool_t	dma_poolp;
1712 	p_hxge_dma_common_t	*dma_buf_p;
1713 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1714 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1715 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1716 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1717 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1718 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1719 	uint32_t		*num_chunks;
1720 
1721 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1722 
1723 	dma_poolp = hxgep->rx_buf_pool_p;
1724 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1725 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1726 		    "(null rx buf pool or buf not allocated"));
1727 		return;
1728 	}
1729 
1730 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1731 	if (dma_rbr_cntl_poolp == NULL ||
1732 	    (!dma_rbr_cntl_poolp->buf_allocated)) {
1733 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1734 		    "<== hxge_free_rx_mem_pool "
1735 		    "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1736 		return;
1737 	}
1738 
1739 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1740 	if (dma_rcr_cntl_poolp == NULL ||
1741 	    (!dma_rcr_cntl_poolp->buf_allocated)) {
1742 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1743 		    "<== hxge_free_rx_mem_pool "
1744 		    "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1745 		return;
1746 	}
1747 
1748 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1749 	if (dma_mbox_cntl_poolp == NULL ||
1750 	    (!dma_mbox_cntl_poolp->buf_allocated)) {
1751 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1752 		    "<== hxge_free_rx_mem_pool "
1753 		    "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1754 		return;
1755 	}
1756 
1757 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1758 	num_chunks = dma_poolp->num_chunks;
1759 
1760 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1761 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1762 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1763 	ndmas = dma_rbr_cntl_poolp->ndmas;
1764 
1765 	for (i = 0; i < ndmas; i++) {
1766 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1767 	}
1768 
1769 	for (i = 0; i < ndmas; i++) {
1770 		hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1771 		hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1772 		hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1773 	}
1774 
1775 	for (i = 0; i < ndmas; i++) {
1776 		KMEM_FREE(dma_buf_p[i],
1777 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1778 		KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1779 		KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1780 		KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1781 	}
1782 
1783 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1784 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1785 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1786 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1787 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1788 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1789 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1790 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1791 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1792 
1793 	hxgep->rx_buf_pool_p = NULL;
1794 	hxgep->rx_rbr_cntl_pool_p = NULL;
1795 	hxgep->rx_rcr_cntl_pool_p = NULL;
1796 	hxgep->rx_mbox_cntl_pool_p = NULL;
1797 
1798 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1799 }
1800 
1801 static hxge_status_t
1802 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1803     p_hxge_dma_common_t *dmap,
1804     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1805 {
1806 	p_hxge_dma_common_t	rx_dmap;
1807 	hxge_status_t		status = HXGE_OK;
1808 	size_t			total_alloc_size;
1809 	size_t			allocated = 0;
1810 	int			i, size_index, array_size;
1811 
1812 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1813 
1814 	rx_dmap = (p_hxge_dma_common_t)
1815 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1816 
1817 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1818 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1819 	    dma_channel, alloc_size, block_size, dmap));
1820 
1821 	total_alloc_size = alloc_size;
1822 
1823 	i = 0;
1824 	size_index = 0;
1825 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1826 	while ((alloc_sizes[size_index] < alloc_size) &&
1827 	    (size_index < array_size))
1828 		size_index++;
1829 	if (size_index >= array_size) {
1830 		size_index = array_size - 1;
1831 	}
1832 
1833 	while ((allocated < total_alloc_size) &&
1834 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1835 		rx_dmap[i].dma_chunk_index = i;
1836 		rx_dmap[i].block_size = block_size;
1837 		rx_dmap[i].alength = alloc_sizes[size_index];
1838 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1839 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1840 		rx_dmap[i].dma_channel = dma_channel;
1841 		rx_dmap[i].contig_alloc_type = B_FALSE;
1842 
1843 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1844 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1845 		    "i %d nblocks %d alength %d",
1846 		    dma_channel, i, &rx_dmap[i], block_size,
1847 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1848 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1849 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1850 		    &hxge_dev_buf_dma_acc_attr,
1851 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1852 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1853 		if (status != HXGE_OK) {
1854 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1855 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1856 			    " for size: %d", alloc_sizes[size_index]));
1857 			size_index--;
1858 		} else {
1859 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1860 			    " alloc_rx_buf_dma allocated rdc %d "
1861 			    "chunk %d size %x dvma %x bufp %llx ",
1862 			    dma_channel, i, rx_dmap[i].alength,
1863 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1864 			i++;
1865 			allocated += alloc_sizes[size_index];
1866 		}
1867 	}
1868 
1869 	if (allocated < total_alloc_size) {
1870 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1871 		    " hxge_alloc_rx_buf_dma failed due to"
1872 		    " allocated(%d) < required(%d)",
1873 		    allocated, total_alloc_size));
1874 		goto hxge_alloc_rx_mem_fail1;
1875 	}
1876 
1877 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1878 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1879 
1880 	*num_chunks = i;
1881 	*dmap = rx_dmap;
1882 
1883 	goto hxge_alloc_rx_mem_exit;
1884 
1885 hxge_alloc_rx_mem_fail1:
1886 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1887 
1888 hxge_alloc_rx_mem_exit:
1889 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1890 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1891 
1892 	return (status);
1893 }
1894 
1895 /*ARGSUSED*/
1896 static void
1897 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1898     uint32_t num_chunks)
1899 {
1900 	int i;
1901 
1902 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1903 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1904 
1905 	for (i = 0; i < num_chunks; i++) {
1906 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1907 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1908 		hxge_dma_mem_free(dmap++);
1909 	}
1910 
1911 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1912 }
1913 
1914 /*ARGSUSED*/
1915 static hxge_status_t
1916 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1917     p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1918 {
1919 	p_hxge_dma_common_t	rx_dmap;
1920 	hxge_status_t		status = HXGE_OK;
1921 
1922 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1923 
1924 	rx_dmap = (p_hxge_dma_common_t)
1925 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1926 
1927 	rx_dmap->contig_alloc_type = B_FALSE;
1928 
1929 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1930 	    attr, size, &hxge_dev_desc_dma_acc_attr,
1931 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1932 	if (status != HXGE_OK) {
1933 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1934 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1935 		    " for size: %d", size));
1936 		goto hxge_alloc_rx_cntl_dma_fail1;
1937 	}
1938 
1939 	*dmap = rx_dmap;
1940 
1941 	goto hxge_alloc_rx_cntl_dma_exit;
1942 
1943 hxge_alloc_rx_cntl_dma_fail1:
1944 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1945 
1946 hxge_alloc_rx_cntl_dma_exit:
1947 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1948 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1949 
1950 	return (status);
1951 }
1952 
1953 /*ARGSUSED*/
1954 static void
1955 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1956 {
1957 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1958 
1959 	hxge_dma_mem_free(dmap);
1960 
1961 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1962 }
1963 
1964 static hxge_status_t
1965 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1966 {
1967 	hxge_status_t		status = HXGE_OK;
1968 	int			i, j;
1969 	uint32_t		ndmas, st_tdc;
1970 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1971 	p_hxge_hw_pt_cfg_t	p_cfgp;
1972 	p_hxge_dma_pool_t	dma_poolp;
1973 	p_hxge_dma_common_t	*dma_buf_p;
1974 	p_hxge_dma_pool_t	dma_cntl_poolp;
1975 	p_hxge_dma_common_t	*dma_cntl_p;
1976 	size_t			tx_buf_alloc_size;
1977 	size_t			tx_cntl_alloc_size;
1978 	uint32_t		*num_chunks;	/* per dma */
1979 
1980 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1981 
1982 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1983 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1984 	st_tdc = p_cfgp->start_tdc;
1985 	ndmas = p_cfgp->max_tdcs;
1986 
1987 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1988 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1989 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1990 	/*
1991 	 * Allocate memory for each transmit DMA channel.
1992 	 */
1993 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1994 	    KM_SLEEP);
1995 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1996 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1997 
1998 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1999 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
2000 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
2001 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
2002 
2003 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
2004 
2005 	/*
2006 	 * Assume that each DMA channel will be configured with default
2007 	 * transmit bufer size for copying transmit data. (For packet payload
2008 	 * over this limit, packets will not be copied.)
2009 	 */
2010 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
2011 
2012 	/*
2013 	 * Addresses of transmit descriptor ring and the mailbox must be all
2014 	 * cache-aligned (64 bytes).
2015 	 */
2016 	tx_cntl_alloc_size = hxge_tx_ring_size;
2017 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
2018 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
2019 
2020 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
2021 	    KM_SLEEP);
2022 
2023 	/*
2024 	 * Allocate memory for transmit buffers and descriptor rings. Replace
2025 	 * allocation functions with interface functions provided by the
2026 	 * partition manager when it is available.
2027 	 *
2028 	 * Allocate memory for the transmit buffer pool.
2029 	 */
2030 	for (i = 0; i < ndmas; i++) {
2031 		num_chunks[i] = 0;
2032 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
2033 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
2034 		if (status != HXGE_OK) {
2035 			break;
2036 		}
2037 		st_tdc++;
2038 	}
2039 
2040 	if (i < ndmas) {
2041 		goto hxge_alloc_tx_mem_pool_fail1;
2042 	}
2043 
2044 	st_tdc = p_cfgp->start_tdc;
2045 
2046 	/*
2047 	 * Allocate memory for descriptor rings and mailbox.
2048 	 */
2049 	for (j = 0; j < ndmas; j++) {
2050 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2051 		    tx_cntl_alloc_size);
2052 		if (status != HXGE_OK) {
2053 			break;
2054 		}
2055 		st_tdc++;
2056 	}
2057 
2058 	if (j < ndmas) {
2059 		goto hxge_alloc_tx_mem_pool_fail2;
2060 	}
2061 
2062 	dma_poolp->ndmas = ndmas;
2063 	dma_poolp->num_chunks = num_chunks;
2064 	dma_poolp->buf_allocated = B_TRUE;
2065 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2066 	hxgep->tx_buf_pool_p = dma_poolp;
2067 
2068 	dma_cntl_poolp->ndmas = ndmas;
2069 	dma_cntl_poolp->buf_allocated = B_TRUE;
2070 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2071 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2072 
2073 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2074 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2075 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2076 
2077 	goto hxge_alloc_tx_mem_pool_exit;
2078 
2079 hxge_alloc_tx_mem_pool_fail2:
2080 	/* Free control buffers */
2081 	j--;
2082 	for (; j >= 0; j--) {
2083 		hxge_free_tx_cntl_dma(hxgep,
2084 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
2085 	}
2086 
2087 hxge_alloc_tx_mem_pool_fail1:
2088 	/* Free data buffers */
2089 	i--;
2090 	for (; i >= 0; i--) {
2091 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2092 		    num_chunks[i]);
2093 	}
2094 
2095 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2096 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2097 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2098 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2099 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2100 
2101 hxge_alloc_tx_mem_pool_exit:
2102 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2103 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2104 
2105 	return (status);
2106 }
2107 
2108 static hxge_status_t
2109 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2110     p_hxge_dma_common_t *dmap, size_t alloc_size,
2111     size_t block_size, uint32_t *num_chunks)
2112 {
2113 	p_hxge_dma_common_t	tx_dmap;
2114 	hxge_status_t		status = HXGE_OK;
2115 	size_t			total_alloc_size;
2116 	size_t			allocated = 0;
2117 	int			i, size_index, array_size;
2118 
2119 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2120 
2121 	tx_dmap = (p_hxge_dma_common_t)
2122 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2123 
2124 	total_alloc_size = alloc_size;
2125 	i = 0;
2126 	size_index = 0;
2127 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
2128 	while ((alloc_sizes[size_index] < alloc_size) &&
2129 	    (size_index < array_size))
2130 		size_index++;
2131 	if (size_index >= array_size) {
2132 		size_index = array_size - 1;
2133 	}
2134 
2135 	while ((allocated < total_alloc_size) &&
2136 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2137 		tx_dmap[i].dma_chunk_index = i;
2138 		tx_dmap[i].block_size = block_size;
2139 		tx_dmap[i].alength = alloc_sizes[size_index];
2140 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2141 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2142 		tx_dmap[i].dma_channel = dma_channel;
2143 		tx_dmap[i].contig_alloc_type = B_FALSE;
2144 
2145 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2146 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
2147 		    &hxge_dev_buf_dma_acc_attr,
2148 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
2149 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
2150 		if (status != HXGE_OK) {
2151 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2152 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
2153 			    " for size: %d", alloc_sizes[size_index]));
2154 			size_index--;
2155 		} else {
2156 			i++;
2157 			allocated += alloc_sizes[size_index];
2158 		}
2159 	}
2160 
2161 	if (allocated < total_alloc_size) {
2162 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2163 		    " hxge_alloc_tx_buf_dma: failed due to"
2164 		    " allocated(%d) < required(%d)",
2165 		    allocated, total_alloc_size));
2166 		goto hxge_alloc_tx_mem_fail1;
2167 	}
2168 
2169 	*num_chunks = i;
2170 	*dmap = tx_dmap;
2171 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2172 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2173 	    *dmap, i));
2174 	goto hxge_alloc_tx_mem_exit;
2175 
2176 hxge_alloc_tx_mem_fail1:
2177 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2178 
2179 hxge_alloc_tx_mem_exit:
2180 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2181 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2182 
2183 	return (status);
2184 }
2185 
2186 /*ARGSUSED*/
2187 static void
2188 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2189     uint32_t num_chunks)
2190 {
2191 	int i;
2192 
2193 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2194 
2195 	for (i = 0; i < num_chunks; i++) {
2196 		hxge_dma_mem_free(dmap++);
2197 	}
2198 
2199 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2200 }
2201 
2202 /*ARGSUSED*/
2203 static hxge_status_t
2204 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2205     p_hxge_dma_common_t *dmap, size_t size)
2206 {
2207 	p_hxge_dma_common_t	tx_dmap;
2208 	hxge_status_t		status = HXGE_OK;
2209 
2210 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2211 
2212 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2213 	    KM_SLEEP);
2214 
2215 	tx_dmap->contig_alloc_type = B_FALSE;
2216 
2217 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2218 	    &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2219 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2220 	if (status != HXGE_OK) {
2221 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2222 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2223 		    " for size: %d", size));
2224 		goto hxge_alloc_tx_cntl_dma_fail1;
2225 	}
2226 
2227 	*dmap = tx_dmap;
2228 
2229 	goto hxge_alloc_tx_cntl_dma_exit;
2230 
2231 hxge_alloc_tx_cntl_dma_fail1:
2232 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2233 
2234 hxge_alloc_tx_cntl_dma_exit:
2235 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2236 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2237 
2238 	return (status);
2239 }
2240 
2241 /*ARGSUSED*/
2242 static void
2243 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2244 {
2245 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2246 
2247 	hxge_dma_mem_free(dmap);
2248 
2249 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2250 }
2251 
2252 static void
2253 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2254 {
2255 	uint32_t		i, ndmas;
2256 	p_hxge_dma_pool_t	dma_poolp;
2257 	p_hxge_dma_common_t	*dma_buf_p;
2258 	p_hxge_dma_pool_t	dma_cntl_poolp;
2259 	p_hxge_dma_common_t	*dma_cntl_p;
2260 	uint32_t		*num_chunks;
2261 
2262 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2263 
2264 	dma_poolp = hxgep->tx_buf_pool_p;
2265 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2266 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2267 		    "<== hxge_free_tx_mem_pool "
2268 		    "(null rx buf pool or buf not allocated"));
2269 		return;
2270 	}
2271 
2272 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2273 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2274 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2275 		    "<== hxge_free_tx_mem_pool "
2276 		    "(null tx cntl buf pool or cntl buf not allocated"));
2277 		return;
2278 	}
2279 
2280 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2281 	num_chunks = dma_poolp->num_chunks;
2282 
2283 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2284 	ndmas = dma_cntl_poolp->ndmas;
2285 
2286 	for (i = 0; i < ndmas; i++) {
2287 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2288 	}
2289 
2290 	for (i = 0; i < ndmas; i++) {
2291 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2292 	}
2293 
2294 	for (i = 0; i < ndmas; i++) {
2295 		KMEM_FREE(dma_buf_p[i],
2296 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2297 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2298 	}
2299 
2300 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2301 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2302 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2303 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2304 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2305 
2306 	hxgep->tx_buf_pool_p = NULL;
2307 	hxgep->tx_cntl_pool_p = NULL;
2308 
2309 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2310 }
2311 
2312 /*ARGSUSED*/
2313 static hxge_status_t
2314 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2315     struct ddi_dma_attr *dma_attrp,
2316     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2317     p_hxge_dma_common_t dma_p)
2318 {
2319 	caddr_t		kaddrp;
2320 	int		ddi_status = DDI_SUCCESS;
2321 
2322 	dma_p->dma_handle = NULL;
2323 	dma_p->acc_handle = NULL;
2324 	dma_p->kaddrp = NULL;
2325 
2326 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2327 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2328 	if (ddi_status != DDI_SUCCESS) {
2329 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2330 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2331 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2332 	}
2333 
2334 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2335 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2336 	    &dma_p->acc_handle);
2337 	if (ddi_status != DDI_SUCCESS) {
2338 		/* The caller will decide whether it is fatal */
2339 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2340 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2341 		ddi_dma_free_handle(&dma_p->dma_handle);
2342 		dma_p->dma_handle = NULL;
2343 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2344 	}
2345 
2346 	if (dma_p->alength < length) {
2347 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2348 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2349 		ddi_dma_mem_free(&dma_p->acc_handle);
2350 		ddi_dma_free_handle(&dma_p->dma_handle);
2351 		dma_p->acc_handle = NULL;
2352 		dma_p->dma_handle = NULL;
2353 		return (HXGE_ERROR);
2354 	}
2355 
2356 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2357 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2358 	    &dma_p->dma_cookie, &dma_p->ncookies);
2359 	if (ddi_status != DDI_DMA_MAPPED) {
2360 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2361 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2362 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2363 		if (dma_p->acc_handle) {
2364 			ddi_dma_mem_free(&dma_p->acc_handle);
2365 			dma_p->acc_handle = NULL;
2366 		}
2367 		ddi_dma_free_handle(&dma_p->dma_handle);
2368 		dma_p->dma_handle = NULL;
2369 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2370 	}
2371 
2372 	if (dma_p->ncookies != 1) {
2373 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2374 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2375 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2376 		if (dma_p->acc_handle) {
2377 			ddi_dma_mem_free(&dma_p->acc_handle);
2378 			dma_p->acc_handle = NULL;
2379 		}
2380 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2381 		ddi_dma_free_handle(&dma_p->dma_handle);
2382 		dma_p->dma_handle = NULL;
2383 		return (HXGE_ERROR);
2384 	}
2385 
2386 	dma_p->kaddrp = kaddrp;
2387 #if defined(__i386)
2388 	dma_p->ioaddr_pp =
2389 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2390 #else
2391 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2392 #endif
2393 
2394 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2395 
2396 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2397 	    "dma buffer allocated: dma_p $%p "
2398 	    "return dmac_ladress from cookie $%p dmac_size %d "
2399 	    "dma_p->ioaddr_p $%p "
2400 	    "dma_p->orig_ioaddr_p $%p "
2401 	    "orig_vatopa $%p "
2402 	    "alength %d (0x%x) "
2403 	    "kaddrp $%p "
2404 	    "length %d (0x%x)",
2405 	    dma_p,
2406 	    dma_p->dma_cookie.dmac_laddress,
2407 	    dma_p->dma_cookie.dmac_size,
2408 	    dma_p->ioaddr_pp,
2409 	    dma_p->orig_ioaddr_pp,
2410 	    dma_p->orig_vatopa,
2411 	    dma_p->alength, dma_p->alength,
2412 	    kaddrp,
2413 	    length, length));
2414 
2415 	return (HXGE_OK);
2416 }
2417 
2418 static void
2419 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2420 {
2421 	if (dma_p == NULL)
2422 		return;
2423 
2424 	if (dma_p->dma_handle != NULL) {
2425 		if (dma_p->ncookies) {
2426 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2427 			dma_p->ncookies = 0;
2428 		}
2429 		ddi_dma_free_handle(&dma_p->dma_handle);
2430 		dma_p->dma_handle = NULL;
2431 	}
2432 
2433 	if (dma_p->acc_handle != NULL) {
2434 		ddi_dma_mem_free(&dma_p->acc_handle);
2435 		dma_p->acc_handle = NULL;
2436 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2437 	}
2438 
2439 	dma_p->kaddrp = NULL;
2440 	dma_p->alength = NULL;
2441 }
2442 
2443 /*
2444  *	hxge_m_start() -- start transmitting and receiving.
2445  *
2446  *	This function is called by the MAC layer when the first
2447  *	stream is open to prepare the hardware ready for sending
2448  *	and transmitting packets.
2449  */
2450 static int
2451 hxge_m_start(void *arg)
2452 {
2453 	p_hxge_t hxgep = (p_hxge_t)arg;
2454 
2455 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2456 
2457 	MUTEX_ENTER(hxgep->genlock);
2458 
2459 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2460 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2461 		    "<== hxge_m_start: initialization failed"));
2462 		MUTEX_EXIT(hxgep->genlock);
2463 		return (EIO);
2464 	}
2465 
2466 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2467 		/*
2468 		 * Start timer to check the system error and tx hangs
2469 		 */
2470 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2471 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2472 
2473 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2474 	}
2475 
2476 	MUTEX_EXIT(hxgep->genlock);
2477 
2478 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2479 
2480 	return (0);
2481 }
2482 
2483 /*
2484  * hxge_m_stop(): stop transmitting and receiving.
2485  */
2486 static void
2487 hxge_m_stop(void *arg)
2488 {
2489 	p_hxge_t hxgep = (p_hxge_t)arg;
2490 
2491 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2492 
2493 	if (hxgep->hxge_timerid) {
2494 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2495 		hxgep->hxge_timerid = 0;
2496 	}
2497 
2498 	MUTEX_ENTER(hxgep->genlock);
2499 
2500 	hxge_uninit(hxgep);
2501 
2502 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2503 
2504 	MUTEX_EXIT(hxgep->genlock);
2505 
2506 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2507 }
2508 
2509 static int
2510 hxge_m_unicst(void *arg, const uint8_t *macaddr)
2511 {
2512 	p_hxge_t		hxgep = (p_hxge_t)arg;
2513 	struct ether_addr	addrp;
2514 	hxge_status_t		status;
2515 
2516 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
2517 
2518 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
2519 
2520 	status = hxge_set_mac_addr(hxgep, &addrp);
2521 	if (status != HXGE_OK) {
2522 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2523 		    "<== hxge_m_unicst: set unitcast failed"));
2524 		return (EINVAL);
2525 	}
2526 
2527 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
2528 
2529 	return (0);
2530 }
2531 
2532 static int
2533 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2534 {
2535 	p_hxge_t		hxgep = (p_hxge_t)arg;
2536 	struct ether_addr	addrp;
2537 
2538 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2539 
2540 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2541 
2542 	if (add) {
2543 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2544 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2545 			    "<== hxge_m_multicst: add multicast failed"));
2546 			return (EINVAL);
2547 		}
2548 	} else {
2549 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2550 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2551 			    "<== hxge_m_multicst: del multicast failed"));
2552 			return (EINVAL);
2553 		}
2554 	}
2555 
2556 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2557 
2558 	return (0);
2559 }
2560 
2561 static int
2562 hxge_m_promisc(void *arg, boolean_t on)
2563 {
2564 	p_hxge_t hxgep = (p_hxge_t)arg;
2565 
2566 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2567 
2568 	if (hxge_set_promisc(hxgep, on)) {
2569 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2570 		    "<== hxge_m_promisc: set promisc failed"));
2571 		return (EINVAL);
2572 	}
2573 
2574 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2575 
2576 	return (0);
2577 }
2578 
2579 static void
2580 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2581 {
2582 	p_hxge_t	hxgep = (p_hxge_t)arg;
2583 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2584 	boolean_t	need_privilege;
2585 	int		err;
2586 	int		cmd;
2587 
2588 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2589 
2590 	iocp = (struct iocblk *)mp->b_rptr;
2591 	iocp->ioc_error = 0;
2592 	need_privilege = B_TRUE;
2593 	cmd = iocp->ioc_cmd;
2594 
2595 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2596 	switch (cmd) {
2597 	default:
2598 		miocnak(wq, mp, 0, EINVAL);
2599 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2600 		return;
2601 
2602 	case LB_GET_INFO_SIZE:
2603 	case LB_GET_INFO:
2604 	case LB_GET_MODE:
2605 		need_privilege = B_FALSE;
2606 		break;
2607 
2608 	case LB_SET_MODE:
2609 		break;
2610 
2611 	case ND_GET:
2612 		need_privilege = B_FALSE;
2613 		break;
2614 	case ND_SET:
2615 		break;
2616 
2617 	case HXGE_GET64:
2618 	case HXGE_PUT64:
2619 	case HXGE_GET_TX_RING_SZ:
2620 	case HXGE_GET_TX_DESC:
2621 	case HXGE_TX_SIDE_RESET:
2622 	case HXGE_RX_SIDE_RESET:
2623 	case HXGE_GLOBAL_RESET:
2624 	case HXGE_RESET_MAC:
2625 	case HXGE_PUT_TCAM:
2626 	case HXGE_GET_TCAM:
2627 	case HXGE_RTRACE:
2628 
2629 		need_privilege = B_FALSE;
2630 		break;
2631 	}
2632 
2633 	if (need_privilege) {
2634 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2635 		if (err != 0) {
2636 			miocnak(wq, mp, 0, err);
2637 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2638 			    "<== hxge_m_ioctl: no priv"));
2639 			return;
2640 		}
2641 	}
2642 
2643 	switch (cmd) {
2644 	case ND_GET:
2645 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2646 	case ND_SET:
2647 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2648 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2649 		break;
2650 
2651 	case LB_GET_MODE:
2652 	case LB_SET_MODE:
2653 	case LB_GET_INFO_SIZE:
2654 	case LB_GET_INFO:
2655 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2656 		break;
2657 
2658 	case HXGE_PUT_TCAM:
2659 	case HXGE_GET_TCAM:
2660 	case HXGE_GET64:
2661 	case HXGE_PUT64:
2662 	case HXGE_GET_TX_RING_SZ:
2663 	case HXGE_GET_TX_DESC:
2664 	case HXGE_TX_SIDE_RESET:
2665 	case HXGE_RX_SIDE_RESET:
2666 	case HXGE_GLOBAL_RESET:
2667 	case HXGE_RESET_MAC:
2668 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2669 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2670 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2671 		break;
2672 	}
2673 
2674 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2675 }
2676 
2677 extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
2678 
2679 static void
2680 hxge_m_resources(void *arg)
2681 {
2682 	p_hxge_t hxgep = arg;
2683 	mac_rx_fifo_t mrf;
2684 	p_rx_rcr_rings_t rcr_rings;
2685 	p_rx_rcr_ring_t *rcr_p;
2686 	p_rx_rcr_ring_t rcrp;
2687 	uint32_t i, ndmas;
2688 	int status;
2689 
2690 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources"));
2691 
2692 	MUTEX_ENTER(hxgep->genlock);
2693 
2694 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2695 		status = hxge_init(hxgep);
2696 		if (status != HXGE_OK) {
2697 			HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: "
2698 			    "hxge_init failed"));
2699 			MUTEX_EXIT(hxgep->genlock);
2700 			return;
2701 		}
2702 	}
2703 
2704 	mrf.mrf_type = MAC_RX_FIFO;
2705 	mrf.mrf_blank = hxge_rx_hw_blank;
2706 	mrf.mrf_arg = (void *)hxgep;
2707 
2708 	mrf.mrf_normal_blank_time = RXDMA_RCR_TO_DEFAULT;
2709 	mrf.mrf_normal_pkt_count = RXDMA_RCR_PTHRES_DEFAULT;
2710 
2711 	rcr_rings = hxgep->rx_rcr_rings;
2712 	rcr_p = rcr_rings->rcr_rings;
2713 	ndmas = rcr_rings->ndmas;
2714 
2715 	/*
2716 	 * Export our receive resources to the MAC layer.
2717 	 */
2718 	for (i = 0; i < ndmas; i++) {
2719 		rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i];
2720 		rcrp->rcr_mac_handle =
2721 		    mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf);
2722 
2723 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2724 		    "==> hxge_m_resources: vdma %d dma %d "
2725 		    "rcrptr 0x%016llx mac_handle 0x%016llx",
2726 		    i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle));
2727 	}
2728 
2729 	MUTEX_EXIT(hxgep->genlock);
2730 
2731 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources"));
2732 }
2733 
2734 /*
2735  * Set an alternate MAC address
2736  */
2737 static int
2738 hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot)
2739 {
2740 	uint64_t	address;
2741 	uint64_t	tmp;
2742 	hpi_status_t	status;
2743 	uint8_t		addrn;
2744 	int		i;
2745 
2746 	/*
2747 	 * Convert a byte array to a 48 bit value.
2748 	 * Need to check endianess if in doubt
2749 	 */
2750 	address = 0;
2751 	for (i = 0; i < ETHERADDRL; i++) {
2752 		tmp = maddr[i];
2753 		address <<= 8;
2754 		address |= tmp;
2755 	}
2756 
2757 	addrn = (uint8_t)slot;
2758 	status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address);
2759 	if (status != HPI_SUCCESS)
2760 		return (EIO);
2761 
2762 	return (0);
2763 }
2764 
2765 static void
2766 hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot)
2767 {
2768 	p_hxge_mmac_stats_t	mmac_stats;
2769 	int			i;
2770 	hxge_mmac_t		*mmac_info;
2771 
2772 	mmac_info = &hxgep->hxge_mmac_info;
2773 	mmac_stats = &hxgep->statsp->mmac_stats;
2774 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
2775 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
2776 
2777 	for (i = 0; i < ETHERADDRL; i++) {
2778 		mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] =
2779 		    mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
2780 	}
2781 }
2782 
2783 /*
2784  * Find an unused address slot, set the address value to the one specified,
2785  * enable the port to start filtering on the new MAC address.
2786  * Returns: 0 on success.
2787  */
2788 int
2789 hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
2790 {
2791 	p_hxge_t	hxgep = arg;
2792 	mac_addr_slot_t	slot;
2793 	hxge_mmac_t	*mmac_info;
2794 	int		err;
2795 	hxge_status_t	status;
2796 
2797 	mutex_enter(hxgep->genlock);
2798 
2799 	/*
2800 	 * Make sure that hxge is initialized, if _start() has
2801 	 * not been called.
2802 	 */
2803 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2804 		status = hxge_init(hxgep);
2805 		if (status != HXGE_OK) {
2806 			mutex_exit(hxgep->genlock);
2807 			return (ENXIO);
2808 		}
2809 	}
2810 
2811 	mmac_info = &hxgep->hxge_mmac_info;
2812 	if (mmac_info->naddrfree == 0) {
2813 		mutex_exit(hxgep->genlock);
2814 		return (ENOSPC);
2815 	}
2816 
2817 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2818 	    maddr->mma_addrlen)) {
2819 		mutex_exit(hxgep->genlock);
2820 		return (EINVAL);
2821 	}
2822 
2823 	/*
2824 	 * Search for the first available slot. Because naddrfree
2825 	 * is not zero, we are guaranteed to find one.
2826 	 * Slot 0 is for unique (primary) MAC.  The first alternate
2827 	 * MAC slot is slot 1.
2828 	 */
2829 	for (slot = 1; slot < mmac_info->num_mmac; slot++) {
2830 		if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
2831 			break;
2832 	}
2833 
2834 	ASSERT(slot < mmac_info->num_mmac);
2835 	if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) {
2836 		mutex_exit(hxgep->genlock);
2837 		return (err);
2838 	}
2839 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
2840 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
2841 	mmac_info->naddrfree--;
2842 	hxge_mmac_kstat_update(hxgep, slot);
2843 
2844 	maddr->mma_slot = slot;
2845 
2846 	mutex_exit(hxgep->genlock);
2847 	return (0);
2848 }
2849 
2850 /*
2851  * Remove the specified mac address and update
2852  * the h/w not to filter the mac address anymore.
2853  * Returns: 0, on success.
2854  */
2855 int
2856 hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
2857 {
2858 	p_hxge_t	hxgep = arg;
2859 	hxge_mmac_t	*mmac_info;
2860 	int		err = 0;
2861 	hxge_status_t	status;
2862 
2863 	mutex_enter(hxgep->genlock);
2864 
2865 	/*
2866 	 * Make sure that hxge is initialized, if _start() has
2867 	 * not been called.
2868 	 */
2869 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2870 		status = hxge_init(hxgep);
2871 		if (status != HXGE_OK) {
2872 			mutex_exit(hxgep->genlock);
2873 			return (ENXIO);
2874 		}
2875 	}
2876 
2877 	mmac_info = &hxgep->hxge_mmac_info;
2878 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2879 		mutex_exit(hxgep->genlock);
2880 		return (EINVAL);
2881 	}
2882 
2883 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2884 		if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) ==
2885 		    HPI_SUCCESS) {
2886 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
2887 			mmac_info->naddrfree++;
2888 			/*
2889 			 * Clear mac_pool[slot].addr so that kstat shows 0
2890 			 * alternate MAC address if the slot is not used.
2891 			 */
2892 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
2893 			hxge_mmac_kstat_update(hxgep, slot);
2894 		} else {
2895 			err = EIO;
2896 		}
2897 	} else {
2898 		err = EINVAL;
2899 	}
2900 
2901 	mutex_exit(hxgep->genlock);
2902 	return (err);
2903 }
2904 
2905 /*
2906  * Modify a mac address added by hxge_mmac_add().
2907  * Returns: 0, on success.
2908  */
2909 int
2910 hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
2911 {
2912 	p_hxge_t	hxgep = arg;
2913 	mac_addr_slot_t	slot;
2914 	hxge_mmac_t	*mmac_info;
2915 	int		err = 0;
2916 	hxge_status_t	status;
2917 
2918 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2919 	    maddr->mma_addrlen))
2920 		return (EINVAL);
2921 
2922 	slot = maddr->mma_slot;
2923 
2924 	mutex_enter(hxgep->genlock);
2925 
2926 	/*
2927 	 * Make sure that hxge is initialized, if _start() has
2928 	 * not been called.
2929 	 */
2930 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2931 		status = hxge_init(hxgep);
2932 		if (status != HXGE_OK) {
2933 			mutex_exit(hxgep->genlock);
2934 			return (ENXIO);
2935 		}
2936 	}
2937 
2938 	mmac_info = &hxgep->hxge_mmac_info;
2939 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2940 		mutex_exit(hxgep->genlock);
2941 		return (EINVAL);
2942 	}
2943 
2944 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2945 		if ((err = hxge_altmac_set(hxgep, maddr->mma_addr,
2946 		    slot)) == 0) {
2947 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
2948 			    ETHERADDRL);
2949 			hxge_mmac_kstat_update(hxgep, slot);
2950 		}
2951 	} else {
2952 		err = EINVAL;
2953 	}
2954 
2955 	mutex_exit(hxgep->genlock);
2956 	return (err);
2957 }
2958 
2959 /*
2960  * static int
2961  * hxge_m_mmac_get() - Get the MAC address and other information
2962  *	related to the slot.  mma_flags should be set to 0 in the call.
2963  *	Note: although kstat shows MAC address as zero when a slot is
2964  *	not used, Crossbow expects hxge_m_mmac_get to copy factory MAC
2965  *	to the caller as long as the slot is not using a user MAC address.
2966  *	The following table shows the rules,
2967  *
2968  *     					USED    VENDOR    mma_addr
2969  *	------------------------------------------------------------
2970  *	(1) Slot uses a user MAC:	yes      no     user MAC
2971  *	(2) Slot uses a factory MAC:    yes      yes    factory MAC
2972  *	(3) Slot is not used but is
2973  *	     factory MAC capable:	no       yes    factory MAC
2974  *	(4) Slot is not used and is
2975  *	     not factory MAC capable:   no       no	0
2976  *	------------------------------------------------------------
2977  */
2978 int
2979 hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
2980 {
2981 	hxge_t		*hxgep = arg;
2982 	mac_addr_slot_t	slot;
2983 	hxge_mmac_t	*mmac_info;
2984 	hxge_status_t	status;
2985 
2986 	slot = maddr->mma_slot;
2987 
2988 	mutex_enter(hxgep->genlock);
2989 
2990 	/*
2991 	 * Make sure that hxge is initialized, if _start() has
2992 	 * not been called.
2993 	 */
2994 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2995 		status = hxge_init(hxgep);
2996 		if (status != HXGE_OK) {
2997 			mutex_exit(hxgep->genlock);
2998 			return (ENXIO);
2999 		}
3000 	}
3001 
3002 	mmac_info = &hxgep->hxge_mmac_info;
3003 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
3004 		mutex_exit(hxgep->genlock);
3005 		return (EINVAL);
3006 	}
3007 
3008 	maddr->mma_flags = 0;
3009 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3010 		maddr->mma_flags |= MMAC_SLOT_USED;
3011 		bcopy(mmac_info->mac_pool[slot].addr,
3012 		    maddr->mma_addr, ETHERADDRL);
3013 		maddr->mma_addrlen = ETHERADDRL;
3014 	}
3015 
3016 	mutex_exit(hxgep->genlock);
3017 	return (0);
3018 }
3019 
3020 /*ARGSUSED*/
3021 boolean_t
3022 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3023 {
3024 	p_hxge_t		hxgep = (p_hxge_t)arg;
3025 	uint32_t		*txflags = cap_data;
3026 	multiaddress_capab_t	*mmacp = cap_data;
3027 
3028 	switch (cap) {
3029 	case MAC_CAPAB_HCKSUM:
3030 		*txflags = HCKSUM_INET_PARTIAL;
3031 		break;
3032 
3033 	case MAC_CAPAB_POLL:
3034 		/*
3035 		 * There's nothing for us to fill in, simply returning B_TRUE
3036 		 * stating that we support polling is sufficient.
3037 		 */
3038 		break;
3039 
3040 	case MAC_CAPAB_MULTIADDRESS:
3041 		/*
3042 		 * The number of MAC addresses made available by
3043 		 * this capability is one less than the total as
3044 		 * the primary address in slot 0 is counted in
3045 		 * the total.
3046 		 */
3047 		mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1;
3048 		mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree;
3049 		mmacp->maddr_flag = 0;	/* No multiple factory macs */
3050 		mmacp->maddr_handle = hxgep;
3051 		mmacp->maddr_add = hxge_m_mmac_add;
3052 		mmacp->maddr_remove = hxge_m_mmac_remove;
3053 		mmacp->maddr_modify = hxge_m_mmac_modify;
3054 		mmacp->maddr_get = hxge_m_mmac_get;
3055 		mmacp->maddr_reserve = NULL;	/* No multiple factory macs */
3056 		break;
3057 	default:
3058 		return (B_FALSE);
3059 	}
3060 	return (B_TRUE);
3061 }
3062 
3063 static boolean_t
3064 hxge_param_locked(mac_prop_id_t pr_num)
3065 {
3066 	/*
3067 	 * All adv_* parameters are locked (read-only) while
3068 	 * the device is in any sort of loopback mode ...
3069 	 */
3070 	switch (pr_num) {
3071 		case MAC_PROP_ADV_1000FDX_CAP:
3072 		case MAC_PROP_EN_1000FDX_CAP:
3073 		case MAC_PROP_ADV_1000HDX_CAP:
3074 		case MAC_PROP_EN_1000HDX_CAP:
3075 		case MAC_PROP_ADV_100FDX_CAP:
3076 		case MAC_PROP_EN_100FDX_CAP:
3077 		case MAC_PROP_ADV_100HDX_CAP:
3078 		case MAC_PROP_EN_100HDX_CAP:
3079 		case MAC_PROP_ADV_10FDX_CAP:
3080 		case MAC_PROP_EN_10FDX_CAP:
3081 		case MAC_PROP_ADV_10HDX_CAP:
3082 		case MAC_PROP_EN_10HDX_CAP:
3083 		case MAC_PROP_AUTONEG:
3084 		case MAC_PROP_FLOWCTRL:
3085 			return (B_TRUE);
3086 	}
3087 	return (B_FALSE);
3088 }
3089 
3090 /*
3091  * callback functions for set/get of properties
3092  */
3093 static int
3094 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3095     uint_t pr_valsize, const void *pr_val)
3096 {
3097 	hxge_t		*hxgep = barg;
3098 	p_hxge_stats_t	statsp;
3099 	int		err = 0;
3100 	uint32_t	new_mtu, old_framesize, new_framesize;
3101 
3102 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3103 
3104 	statsp = hxgep->statsp;
3105 	mutex_enter(hxgep->genlock);
3106 	if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3107 	    hxge_param_locked(pr_num)) {
3108 		/*
3109 		 * All adv_* parameters are locked (read-only)
3110 		 * while the device is in any sort of loopback mode.
3111 		 */
3112 		HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3113 		    "==> hxge_m_setprop: loopback mode: read only"));
3114 		mutex_exit(hxgep->genlock);
3115 		return (EBUSY);
3116 	}
3117 
3118 	switch (pr_num) {
3119 		/*
3120 		 * These properties are either not exist or read only
3121 		 */
3122 		case MAC_PROP_EN_1000FDX_CAP:
3123 		case MAC_PROP_EN_100FDX_CAP:
3124 		case MAC_PROP_EN_10FDX_CAP:
3125 		case MAC_PROP_EN_1000HDX_CAP:
3126 		case MAC_PROP_EN_100HDX_CAP:
3127 		case MAC_PROP_EN_10HDX_CAP:
3128 		case MAC_PROP_ADV_1000FDX_CAP:
3129 		case MAC_PROP_ADV_1000HDX_CAP:
3130 		case MAC_PROP_ADV_100FDX_CAP:
3131 		case MAC_PROP_ADV_100HDX_CAP:
3132 		case MAC_PROP_ADV_10FDX_CAP:
3133 		case MAC_PROP_ADV_10HDX_CAP:
3134 		case MAC_PROP_STATUS:
3135 		case MAC_PROP_SPEED:
3136 		case MAC_PROP_DUPLEX:
3137 		case MAC_PROP_AUTONEG:
3138 		/*
3139 		 * Flow control is handled in the shared domain and
3140 		 * it is readonly here.
3141 		 */
3142 		case MAC_PROP_FLOWCTRL:
3143 			err = EINVAL;
3144 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3145 			    "==> hxge_m_setprop:  read only property %d",
3146 			    pr_num));
3147 			break;
3148 
3149 		case MAC_PROP_MTU:
3150 			if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3151 				err = EBUSY;
3152 				break;
3153 			}
3154 
3155 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3156 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3157 			    "==> hxge_m_setprop: set MTU: %d", new_mtu));
3158 
3159 			new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3160 			if (new_framesize == hxgep->vmac.maxframesize) {
3161 				err = 0;
3162 				break;
3163 			}
3164 
3165 			if (new_framesize < MIN_FRAME_SIZE ||
3166 			    new_framesize > MAX_FRAME_SIZE) {
3167 				err = EINVAL;
3168 				break;
3169 			}
3170 
3171 			old_framesize = hxgep->vmac.maxframesize;
3172 			hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3173 
3174 			if (hxge_vmac_set_framesize(hxgep)) {
3175 				hxgep->vmac.maxframesize =
3176 				    (uint16_t)old_framesize;
3177 				err = EINVAL;
3178 				break;
3179 			}
3180 
3181 			err = mac_maxsdu_update(hxgep->mach, new_mtu);
3182 			if (err) {
3183 				hxgep->vmac.maxframesize =
3184 				    (uint16_t)old_framesize;
3185 				(void) hxge_vmac_set_framesize(hxgep);
3186 			}
3187 
3188 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3189 			    "==> hxge_m_setprop: set MTU: %d maxframe %d",
3190 			    new_mtu, hxgep->vmac.maxframesize));
3191 			break;
3192 
3193 		case MAC_PROP_PRIVATE:
3194 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3195 			    "==> hxge_m_setprop: private property"));
3196 			err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3197 			    pr_val);
3198 			break;
3199 
3200 		default:
3201 			err = ENOTSUP;
3202 			break;
3203 	}
3204 
3205 	mutex_exit(hxgep->genlock);
3206 
3207 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3208 	    "<== hxge_m_setprop (return %d)", err));
3209 
3210 	return (err);
3211 }
3212 
3213 /* ARGSUSED */
3214 static int
3215 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
3216     void *pr_val)
3217 {
3218 	int		err = 0;
3219 	link_flowctrl_t	fl;
3220 
3221 	switch (pr_num) {
3222 	case MAC_PROP_DUPLEX:
3223 		*(uint8_t *)pr_val = 2;
3224 		break;
3225 	case MAC_PROP_AUTONEG:
3226 		*(uint8_t *)pr_val = 0;
3227 		break;
3228 	case MAC_PROP_FLOWCTRL:
3229 		if (pr_valsize < sizeof (link_flowctrl_t))
3230 			return (EINVAL);
3231 		fl = LINK_FLOWCTRL_TX;
3232 		bcopy(&fl, pr_val, sizeof (fl));
3233 		break;
3234 	default:
3235 		err = ENOTSUP;
3236 		break;
3237 	}
3238 	return (err);
3239 }
3240 
3241 static int
3242 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3243     uint_t pr_flags, uint_t pr_valsize, void *pr_val)
3244 {
3245 	hxge_t 		*hxgep = barg;
3246 	p_hxge_stats_t	statsp = hxgep->statsp;
3247 	int		err = 0;
3248 	link_flowctrl_t fl;
3249 	uint64_t	tmp = 0;
3250 	link_state_t	ls;
3251 
3252 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3253 	    "==> hxge_m_getprop: pr_num %d", pr_num));
3254 
3255 	if (pr_valsize == 0)
3256 		return (EINVAL);
3257 
3258 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3259 		err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val);
3260 		return (err);
3261 	}
3262 
3263 	bzero(pr_val, pr_valsize);
3264 	switch (pr_num) {
3265 		case MAC_PROP_DUPLEX:
3266 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3267 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3268 			    "==> hxge_m_getprop: duplex mode %d",
3269 			    *(uint8_t *)pr_val));
3270 			break;
3271 
3272 		case MAC_PROP_SPEED:
3273 			if (pr_valsize < sizeof (uint64_t))
3274 				return (EINVAL);
3275 			tmp = statsp->mac_stats.link_speed * 1000000ull;
3276 			bcopy(&tmp, pr_val, sizeof (tmp));
3277 			break;
3278 
3279 		case MAC_PROP_STATUS:
3280 			if (pr_valsize < sizeof (link_state_t))
3281 				return (EINVAL);
3282 			if (!statsp->mac_stats.link_up)
3283 				ls = LINK_STATE_DOWN;
3284 			else
3285 				ls = LINK_STATE_UP;
3286 			bcopy(&ls, pr_val, sizeof (ls));
3287 			break;
3288 
3289 		case MAC_PROP_FLOWCTRL:
3290 			/*
3291 			 * Flow control is supported by the shared domain and
3292 			 * it is currently transmit only
3293 			 */
3294 			if (pr_valsize < sizeof (link_flowctrl_t))
3295 				return (EINVAL);
3296 			fl = LINK_FLOWCTRL_TX;
3297 			bcopy(&fl, pr_val, sizeof (fl));
3298 			break;
3299 		case MAC_PROP_AUTONEG:
3300 			/* 10G link only and it is not negotiable */
3301 			*(uint8_t *)pr_val = 0;
3302 			break;
3303 		case MAC_PROP_ADV_1000FDX_CAP:
3304 		case MAC_PROP_ADV_100FDX_CAP:
3305 		case MAC_PROP_ADV_10FDX_CAP:
3306 		case MAC_PROP_ADV_1000HDX_CAP:
3307 		case MAC_PROP_ADV_100HDX_CAP:
3308 		case MAC_PROP_ADV_10HDX_CAP:
3309 		case MAC_PROP_EN_1000FDX_CAP:
3310 		case MAC_PROP_EN_100FDX_CAP:
3311 		case MAC_PROP_EN_10FDX_CAP:
3312 		case MAC_PROP_EN_1000HDX_CAP:
3313 		case MAC_PROP_EN_100HDX_CAP:
3314 		case MAC_PROP_EN_10HDX_CAP:
3315 			err = ENOTSUP;
3316 			break;
3317 
3318 		case MAC_PROP_PRIVATE:
3319 			err = hxge_get_priv_prop(hxgep, pr_name, pr_flags,
3320 			    pr_valsize, pr_val);
3321 			break;
3322 		default:
3323 			err = EINVAL;
3324 			break;
3325 	}
3326 
3327 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3328 
3329 	return (err);
3330 }
3331 
3332 /* ARGSUSED */
3333 static int
3334 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3335     const void *pr_val)
3336 {
3337 	p_hxge_param_t	param_arr = hxgep->param_arr;
3338 	int		err = 0;
3339 
3340 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3341 	    "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3342 
3343 	if (pr_val == NULL) {
3344 		return (EINVAL);
3345 	}
3346 
3347 	/* Blanking */
3348 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3349 		err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3350 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3351 	} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3352 		err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3353 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3354 
3355 	/* Classification */
3356 	} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3357 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3358 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3359 	} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3360 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3361 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3362 	} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3363 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3364 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3365 	} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3366 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3367 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3368 	} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3369 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3370 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3371 	} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3372 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3373 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3374 	} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3375 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3376 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3377 	} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3378 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3379 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3380 	} else {
3381 		err = EINVAL;
3382 	}
3383 
3384 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3385 	    "<== hxge_set_priv_prop: err %d", err));
3386 
3387 	return (err);
3388 }
3389 
3390 static int
3391 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags,
3392     uint_t pr_valsize, void *pr_val)
3393 {
3394 	p_hxge_param_t	param_arr = hxgep->param_arr;
3395 	char		valstr[MAXNAMELEN];
3396 	int		err = 0;
3397 	uint_t		strsize;
3398 	int		value = 0;
3399 
3400 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3401 	    "==> hxge_get_priv_prop: property %s", pr_name));
3402 
3403 	if (pr_flags & MAC_PROP_DEFAULT) {
3404 		/* Receive Interrupt Blanking Parameters */
3405 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3406 			value = RXDMA_RCR_TO_DEFAULT;
3407 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3408 			value = RXDMA_RCR_PTHRES_DEFAULT;
3409 
3410 		/* Classification and Load Distribution Configuration */
3411 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3412 		    strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3413 		    strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3414 		    strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3415 		    strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3416 		    strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3417 		    strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3418 		    strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3419 			value = HXGE_CLASS_TCAM_LOOKUP;
3420 		} else {
3421 			err = EINVAL;
3422 		}
3423 	} else {
3424 		/* Receive Interrupt Blanking Parameters */
3425 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3426 			value = hxgep->intr_timeout;
3427 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3428 			value = hxgep->intr_threshold;
3429 
3430 		/* Classification and Load Distribution Configuration */
3431 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3432 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3433 			    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3434 
3435 			value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3436 		} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3437 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3438 			    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3439 
3440 			value = (int)param_arr[param_class_opt_ipv4_udp].value;
3441 		} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3442 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3443 			    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3444 
3445 			value = (int)param_arr[param_class_opt_ipv4_ah].value;
3446 		} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3447 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3448 			    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3449 
3450 			value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3451 		} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3452 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3453 			    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3454 
3455 			value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3456 		} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3457 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3458 			    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3459 
3460 			value = (int)param_arr[param_class_opt_ipv6_udp].value;
3461 		} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3462 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3463 			    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3464 
3465 			value = (int)param_arr[param_class_opt_ipv6_ah].value;
3466 		} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3467 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3468 			    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3469 
3470 			value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3471 		} else {
3472 			err = EINVAL;
3473 		}
3474 	}
3475 
3476 	if (err == 0) {
3477 		(void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3478 
3479 		strsize = (uint_t)strlen(valstr);
3480 		if (pr_valsize < strsize) {
3481 			err = ENOBUFS;
3482 		} else {
3483 			(void) strlcpy(pr_val, valstr, pr_valsize);
3484 		}
3485 	}
3486 
3487 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3488 	    "<== hxge_get_priv_prop: return %d", err));
3489 
3490 	return (err);
3491 }
3492 /*
3493  * Module loading and removing entry points.
3494  */
3495 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3496     nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
3497 
3498 extern struct mod_ops mod_driverops;
3499 
3500 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
3501 
3502 /*
3503  * Module linkage information for the kernel.
3504  */
3505 static struct modldrv hxge_modldrv = {
3506 	&mod_driverops,
3507 	HXGE_DESC_VER,
3508 	&hxge_dev_ops
3509 };
3510 
3511 static struct modlinkage modlinkage = {
3512 	MODREV_1, (void *) &hxge_modldrv, NULL
3513 };
3514 
3515 int
3516 _init(void)
3517 {
3518 	int status;
3519 
3520 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3521 	mac_init_ops(&hxge_dev_ops, "hxge");
3522 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3523 	if (status != 0) {
3524 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3525 		    "failed to init device soft state"));
3526 		mac_fini_ops(&hxge_dev_ops);
3527 		goto _init_exit;
3528 	}
3529 
3530 	status = mod_install(&modlinkage);
3531 	if (status != 0) {
3532 		ddi_soft_state_fini(&hxge_list);
3533 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3534 		goto _init_exit;
3535 	}
3536 
3537 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3538 
3539 _init_exit:
3540 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3541 
3542 	return (status);
3543 }
3544 
3545 int
3546 _fini(void)
3547 {
3548 	int status;
3549 
3550 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3551 
3552 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3553 
3554 	if (hxge_mblks_pending)
3555 		return (EBUSY);
3556 
3557 	status = mod_remove(&modlinkage);
3558 	if (status != DDI_SUCCESS) {
3559 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
3560 		    "Module removal failed 0x%08x", status));
3561 		goto _fini_exit;
3562 	}
3563 
3564 	mac_fini_ops(&hxge_dev_ops);
3565 
3566 	ddi_soft_state_fini(&hxge_list);
3567 
3568 	MUTEX_DESTROY(&hxge_common_lock);
3569 
3570 _fini_exit:
3571 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3572 
3573 	return (status);
3574 }
3575 
3576 int
3577 _info(struct modinfo *modinfop)
3578 {
3579 	int status;
3580 
3581 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3582 	status = mod_info(&modlinkage, modinfop);
3583 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3584 
3585 	return (status);
3586 }
3587 
3588 /*ARGSUSED*/
3589 hxge_status_t
3590 hxge_add_intrs(p_hxge_t hxgep)
3591 {
3592 	int		intr_types;
3593 	int		type = 0;
3594 	int		ddi_status = DDI_SUCCESS;
3595 	hxge_status_t	status = HXGE_OK;
3596 
3597 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3598 
3599 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
3600 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3601 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
3602 	hxgep->hxge_intr_type.intr_added = 0;
3603 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3604 	hxgep->hxge_intr_type.intr_type = 0;
3605 
3606 	if (hxge_msi_enable) {
3607 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3608 	}
3609 
3610 	/* Get the supported interrupt types */
3611 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3612 	    != DDI_SUCCESS) {
3613 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3614 		    "ddi_intr_get_supported_types failed: status 0x%08x",
3615 		    ddi_status));
3616 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3617 	}
3618 
3619 	hxgep->hxge_intr_type.intr_types = intr_types;
3620 
3621 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3622 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3623 
3624 	/*
3625 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3626 	 *	(1): 1 - MSI
3627 	 *	(2): 2 - MSI-X
3628 	 *	others - FIXED
3629 	 */
3630 	switch (hxge_msi_enable) {
3631 	default:
3632 		type = DDI_INTR_TYPE_FIXED;
3633 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3634 		    "use fixed (intx emulation) type %08x", type));
3635 		break;
3636 
3637 	case 2:
3638 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3639 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3640 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3641 			type = DDI_INTR_TYPE_MSIX;
3642 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3643 			    "==> hxge_add_intrs: "
3644 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3645 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3646 			type = DDI_INTR_TYPE_MSI;
3647 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3648 			    "==> hxge_add_intrs: "
3649 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3650 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3651 			type = DDI_INTR_TYPE_FIXED;
3652 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3653 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3654 		}
3655 		break;
3656 
3657 	case 1:
3658 		if (intr_types & DDI_INTR_TYPE_MSI) {
3659 			type = DDI_INTR_TYPE_MSI;
3660 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3661 			    "==> hxge_add_intrs: "
3662 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3663 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3664 			type = DDI_INTR_TYPE_MSIX;
3665 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3666 			    "==> hxge_add_intrs: "
3667 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3668 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3669 			type = DDI_INTR_TYPE_FIXED;
3670 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3671 			    "==> hxge_add_intrs: "
3672 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3673 		}
3674 	}
3675 
3676 	hxgep->hxge_intr_type.intr_type = type;
3677 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3678 	    type == DDI_INTR_TYPE_FIXED) &&
3679 	    hxgep->hxge_intr_type.niu_msi_enable) {
3680 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3681 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3682 			    " hxge_add_intrs: "
3683 			    " hxge_add_intrs_adv failed: status 0x%08x",
3684 			    status));
3685 			return (status);
3686 		} else {
3687 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3688 			    "interrupts registered : type %d", type));
3689 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3690 
3691 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3692 			    "\nAdded advanced hxge add_intr_adv "
3693 			    "intr type 0x%x\n", type));
3694 
3695 			return (status);
3696 		}
3697 	}
3698 
3699 	if (!hxgep->hxge_intr_type.intr_registered) {
3700 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3701 		    "==> hxge_add_intrs: failed to register interrupts"));
3702 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3703 	}
3704 
3705 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3706 
3707 	return (status);
3708 }
3709 
3710 /*ARGSUSED*/
3711 static hxge_status_t
3712 hxge_add_soft_intrs(p_hxge_t hxgep)
3713 {
3714 	int		ddi_status = DDI_SUCCESS;
3715 	hxge_status_t	status = HXGE_OK;
3716 
3717 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
3718 
3719 	hxgep->resched_id = NULL;
3720 	hxgep->resched_running = B_FALSE;
3721 	ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
3722 	    &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
3723 	if (ddi_status != DDI_SUCCESS) {
3724 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
3725 		    "ddi_add_softintrs failed: status 0x%08x", ddi_status));
3726 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3727 	}
3728 
3729 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
3730 
3731 	return (status);
3732 }
3733 
3734 /*ARGSUSED*/
3735 static hxge_status_t
3736 hxge_add_intrs_adv(p_hxge_t hxgep)
3737 {
3738 	int		intr_type;
3739 	p_hxge_intr_t	intrp;
3740 	hxge_status_t	status;
3741 
3742 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3743 
3744 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3745 	intr_type = intrp->intr_type;
3746 
3747 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3748 	    intr_type));
3749 
3750 	switch (intr_type) {
3751 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3752 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3753 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3754 		break;
3755 
3756 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3757 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3758 		break;
3759 
3760 	default:
3761 		status = HXGE_ERROR;
3762 		break;
3763 	}
3764 
3765 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3766 
3767 	return (status);
3768 }
3769 
3770 /*ARGSUSED*/
3771 static hxge_status_t
3772 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3773 {
3774 	dev_info_t	*dip = hxgep->dip;
3775 	p_hxge_ldg_t	ldgp;
3776 	p_hxge_intr_t	intrp;
3777 	uint_t		*inthandler;
3778 	void		*arg1, *arg2;
3779 	int		behavior;
3780 	int		nintrs, navail;
3781 	int		nactual, nrequired;
3782 	int		inum = 0;
3783 	int		loop = 0;
3784 	int		x, y;
3785 	int		ddi_status = DDI_SUCCESS;
3786 	hxge_status_t	status = HXGE_OK;
3787 
3788 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3789 
3790 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3791 
3792 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3793 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3794 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3795 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3796 		    "nintrs: %d", ddi_status, nintrs));
3797 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3798 	}
3799 
3800 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3801 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3802 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3803 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3804 		    "nintrs: %d", ddi_status, navail));
3805 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3806 	}
3807 
3808 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3809 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3810 	    int_type, nintrs, navail));
3811 
3812 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3813 		/* MSI must be power of 2 */
3814 		if ((navail & 16) == 16) {
3815 			navail = 16;
3816 		} else if ((navail & 8) == 8) {
3817 			navail = 8;
3818 		} else if ((navail & 4) == 4) {
3819 			navail = 4;
3820 		} else if ((navail & 2) == 2) {
3821 			navail = 2;
3822 		} else {
3823 			navail = 1;
3824 		}
3825 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3826 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3827 		    "navail %d", nintrs, navail));
3828 	}
3829 
3830 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3831 	    "requesting: intr type %d nintrs %d, navail %d",
3832 	    int_type, nintrs, navail));
3833 
3834 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3835 	    DDI_INTR_ALLOC_NORMAL);
3836 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3837 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3838 
3839 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3840 	    navail, &nactual, behavior);
3841 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3842 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3843 		    " ddi_intr_alloc() failed: %d", ddi_status));
3844 		kmem_free(intrp->htable, intrp->intr_size);
3845 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3846 	}
3847 
3848 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3849 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3850 	    navail, nactual));
3851 
3852 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3853 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3854 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3855 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3856 		/* Free already allocated interrupts */
3857 		for (y = 0; y < nactual; y++) {
3858 			(void) ddi_intr_free(intrp->htable[y]);
3859 		}
3860 
3861 		kmem_free(intrp->htable, intrp->intr_size);
3862 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3863 	}
3864 
3865 	nrequired = 0;
3866 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3867 	if (status != HXGE_OK) {
3868 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3869 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3870 		    "failed: 0x%x", status));
3871 		/* Free already allocated interrupts */
3872 		for (y = 0; y < nactual; y++) {
3873 			(void) ddi_intr_free(intrp->htable[y]);
3874 		}
3875 
3876 		kmem_free(intrp->htable, intrp->intr_size);
3877 		return (status);
3878 	}
3879 
3880 	ldgp = hxgep->ldgvp->ldgp;
3881 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3882 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3883 
3884 	if (nactual < nrequired)
3885 		loop = nactual;
3886 	else
3887 		loop = nrequired;
3888 
3889 	for (x = 0; x < loop; x++, ldgp++) {
3890 		ldgp->vector = (uint8_t)x;
3891 		arg1 = ldgp->ldvp;
3892 		arg2 = hxgep;
3893 		if (ldgp->nldvs == 1) {
3894 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3895 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3896 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3897 			    "1-1 int handler (entry %d)\n",
3898 			    arg1, arg2, x));
3899 		} else if (ldgp->nldvs > 1) {
3900 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3901 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3902 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3903 			    "nldevs %d int handler (entry %d)\n",
3904 			    arg1, arg2, ldgp->nldvs, x));
3905 		}
3906 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3907 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3908 		    "htable 0x%llx", x, intrp->htable[x]));
3909 
3910 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3911 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3912 		    DDI_SUCCESS) {
3913 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3914 			    "==> hxge_add_intrs_adv_type: failed #%d "
3915 			    "status 0x%x", x, ddi_status));
3916 			for (y = 0; y < intrp->intr_added; y++) {
3917 				(void) ddi_intr_remove_handler(
3918 				    intrp->htable[y]);
3919 			}
3920 
3921 			/* Free already allocated intr */
3922 			for (y = 0; y < nactual; y++) {
3923 				(void) ddi_intr_free(intrp->htable[y]);
3924 			}
3925 			kmem_free(intrp->htable, intrp->intr_size);
3926 
3927 			(void) hxge_ldgv_uninit(hxgep);
3928 
3929 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3930 		}
3931 
3932 		intrp->intr_added++;
3933 	}
3934 	intrp->msi_intx_cnt = nactual;
3935 
3936 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3937 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3938 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3939 
3940 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3941 	(void) hxge_intr_ldgv_init(hxgep);
3942 
3943 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3944 
3945 	return (status);
3946 }
3947 
3948 /*ARGSUSED*/
3949 static hxge_status_t
3950 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3951 {
3952 	dev_info_t	*dip = hxgep->dip;
3953 	p_hxge_ldg_t	ldgp;
3954 	p_hxge_intr_t	intrp;
3955 	uint_t		*inthandler;
3956 	void		*arg1, *arg2;
3957 	int		behavior;
3958 	int		nintrs, navail;
3959 	int		nactual, nrequired;
3960 	int		inum = 0;
3961 	int		x, y;
3962 	int		ddi_status = DDI_SUCCESS;
3963 	hxge_status_t	status = HXGE_OK;
3964 
3965 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3966 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3967 
3968 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3969 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3970 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3971 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3972 		    "nintrs: %d", status, nintrs));
3973 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3974 	}
3975 
3976 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3977 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3978 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3979 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3980 		    "nintrs: %d", ddi_status, navail));
3981 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3982 	}
3983 
3984 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3985 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3986 	    nintrs, navail));
3987 
3988 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3989 	    DDI_INTR_ALLOC_NORMAL);
3990 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3991 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
3992 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3993 	    navail, &nactual, behavior);
3994 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3995 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3996 		    " ddi_intr_alloc() failed: %d", ddi_status));
3997 		kmem_free(intrp->htable, intrp->intr_size);
3998 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3999 	}
4000 
4001 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4002 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4003 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4004 		    " ddi_intr_get_pri() failed: %d", ddi_status));
4005 		/* Free already allocated interrupts */
4006 		for (y = 0; y < nactual; y++) {
4007 			(void) ddi_intr_free(intrp->htable[y]);
4008 		}
4009 
4010 		kmem_free(intrp->htable, intrp->intr_size);
4011 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4012 	}
4013 
4014 	nrequired = 0;
4015 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4016 	if (status != HXGE_OK) {
4017 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4018 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4019 		    "failed: 0x%x", status));
4020 		/* Free already allocated interrupts */
4021 		for (y = 0; y < nactual; y++) {
4022 			(void) ddi_intr_free(intrp->htable[y]);
4023 		}
4024 
4025 		kmem_free(intrp->htable, intrp->intr_size);
4026 		return (status);
4027 	}
4028 
4029 	ldgp = hxgep->ldgvp->ldgp;
4030 	for (x = 0; x < nrequired; x++, ldgp++) {
4031 		ldgp->vector = (uint8_t)x;
4032 		arg1 = ldgp->ldvp;
4033 		arg2 = hxgep;
4034 		if (ldgp->nldvs == 1) {
4035 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4036 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4037 			    "hxge_add_intrs_adv_type_fix: "
4038 			    "1-1 int handler(%d) ldg %d ldv %d "
4039 			    "arg1 $%p arg2 $%p\n",
4040 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4041 		} else if (ldgp->nldvs > 1) {
4042 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4043 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4044 			    "hxge_add_intrs_adv_type_fix: "
4045 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
4046 			    "arg1 0x%016llx arg2 0x%016llx\n",
4047 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4048 			    arg1, arg2));
4049 		}
4050 
4051 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4052 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4053 		    DDI_SUCCESS) {
4054 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4055 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
4056 			    "status 0x%x", x, ddi_status));
4057 			for (y = 0; y < intrp->intr_added; y++) {
4058 				(void) ddi_intr_remove_handler(
4059 				    intrp->htable[y]);
4060 			}
4061 			for (y = 0; y < nactual; y++) {
4062 				(void) ddi_intr_free(intrp->htable[y]);
4063 			}
4064 			/* Free already allocated intr */
4065 			kmem_free(intrp->htable, intrp->intr_size);
4066 
4067 			(void) hxge_ldgv_uninit(hxgep);
4068 
4069 			return (HXGE_ERROR | HXGE_DDI_FAILED);
4070 		}
4071 		intrp->intr_added++;
4072 	}
4073 
4074 	intrp->msi_intx_cnt = nactual;
4075 
4076 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4077 
4078 	status = hxge_intr_ldgv_init(hxgep);
4079 
4080 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4081 
4082 	return (status);
4083 }
4084 
4085 /*ARGSUSED*/
4086 static void
4087 hxge_remove_intrs(p_hxge_t hxgep)
4088 {
4089 	int		i, inum;
4090 	p_hxge_intr_t	intrp;
4091 
4092 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4093 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4094 	if (!intrp->intr_registered) {
4095 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4096 		    "<== hxge_remove_intrs: interrupts not registered"));
4097 		return;
4098 	}
4099 
4100 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4101 
4102 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4103 		(void) ddi_intr_block_disable(intrp->htable,
4104 		    intrp->intr_added);
4105 	} else {
4106 		for (i = 0; i < intrp->intr_added; i++) {
4107 			(void) ddi_intr_disable(intrp->htable[i]);
4108 		}
4109 	}
4110 
4111 	for (inum = 0; inum < intrp->intr_added; inum++) {
4112 		if (intrp->htable[inum]) {
4113 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
4114 		}
4115 	}
4116 
4117 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4118 		if (intrp->htable[inum]) {
4119 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4120 			    "hxge_remove_intrs: ddi_intr_free inum %d "
4121 			    "msi_intx_cnt %d intr_added %d",
4122 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
4123 
4124 			(void) ddi_intr_free(intrp->htable[inum]);
4125 		}
4126 	}
4127 
4128 	kmem_free(intrp->htable, intrp->intr_size);
4129 	intrp->intr_registered = B_FALSE;
4130 	intrp->intr_enabled = B_FALSE;
4131 	intrp->msi_intx_cnt = 0;
4132 	intrp->intr_added = 0;
4133 
4134 	(void) hxge_ldgv_uninit(hxgep);
4135 
4136 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4137 }
4138 
4139 /*ARGSUSED*/
4140 static void
4141 hxge_remove_soft_intrs(p_hxge_t hxgep)
4142 {
4143 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
4144 
4145 	if (hxgep->resched_id) {
4146 		ddi_remove_softintr(hxgep->resched_id);
4147 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4148 		    "==> hxge_remove_soft_intrs: removed"));
4149 		hxgep->resched_id = NULL;
4150 	}
4151 
4152 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
4153 }
4154 
4155 /*ARGSUSED*/
4156 void
4157 hxge_intrs_enable(p_hxge_t hxgep)
4158 {
4159 	p_hxge_intr_t	intrp;
4160 	int		i;
4161 	int		status;
4162 
4163 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4164 
4165 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4166 
4167 	if (!intrp->intr_registered) {
4168 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4169 		    "interrupts are not registered"));
4170 		return;
4171 	}
4172 
4173 	if (intrp->intr_enabled) {
4174 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4175 		    "<== hxge_intrs_enable: already enabled"));
4176 		return;
4177 	}
4178 
4179 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4180 		status = ddi_intr_block_enable(intrp->htable,
4181 		    intrp->intr_added);
4182 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4183 		    "block enable - status 0x%x total inums #%d\n",
4184 		    status, intrp->intr_added));
4185 	} else {
4186 		for (i = 0; i < intrp->intr_added; i++) {
4187 			status = ddi_intr_enable(intrp->htable[i]);
4188 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4189 			    "ddi_intr_enable:enable - status 0x%x "
4190 			    "total inums %d enable inum #%d\n",
4191 			    status, intrp->intr_added, i));
4192 			if (status == DDI_SUCCESS) {
4193 				intrp->intr_enabled = B_TRUE;
4194 			}
4195 		}
4196 	}
4197 
4198 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4199 }
4200 
4201 /*ARGSUSED*/
4202 static void
4203 hxge_intrs_disable(p_hxge_t hxgep)
4204 {
4205 	p_hxge_intr_t	intrp;
4206 	int		i;
4207 
4208 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4209 
4210 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4211 
4212 	if (!intrp->intr_registered) {
4213 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4214 		    "interrupts are not registered"));
4215 		return;
4216 	}
4217 
4218 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4219 		(void) ddi_intr_block_disable(intrp->htable,
4220 		    intrp->intr_added);
4221 	} else {
4222 		for (i = 0; i < intrp->intr_added; i++) {
4223 			(void) ddi_intr_disable(intrp->htable[i]);
4224 		}
4225 	}
4226 
4227 	intrp->intr_enabled = B_FALSE;
4228 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4229 }
4230 
4231 static hxge_status_t
4232 hxge_mac_register(p_hxge_t hxgep)
4233 {
4234 	mac_register_t	*macp;
4235 	int		status;
4236 
4237 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4238 
4239 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4240 		return (HXGE_ERROR);
4241 
4242 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4243 	macp->m_driver = hxgep;
4244 	macp->m_dip = hxgep->dip;
4245 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4246 
4247 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4248 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4249 	    macp->m_src_addr[0],
4250 	    macp->m_src_addr[1],
4251 	    macp->m_src_addr[2],
4252 	    macp->m_src_addr[3],
4253 	    macp->m_src_addr[4],
4254 	    macp->m_src_addr[5]));
4255 
4256 	macp->m_callbacks = &hxge_m_callbacks;
4257 	macp->m_min_sdu = 0;
4258 	macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4259 	macp->m_margin = VLAN_TAGSZ;
4260 	macp->m_priv_props = hxge_priv_props;
4261 	macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS;
4262 
4263 	status = mac_register(macp, &hxgep->mach);
4264 	mac_free(macp);
4265 
4266 	if (status != 0) {
4267 		cmn_err(CE_WARN,
4268 		    "hxge_mac_register failed (status %d instance %d)",
4269 		    status, hxgep->instance);
4270 		return (HXGE_ERROR);
4271 	}
4272 
4273 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4274 	    "(instance %d)", hxgep->instance));
4275 
4276 	return (HXGE_OK);
4277 }
4278 
4279 static int
4280 hxge_init_common_dev(p_hxge_t hxgep)
4281 {
4282 	p_hxge_hw_list_t	hw_p;
4283 	dev_info_t		*p_dip;
4284 
4285 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4286 
4287 	p_dip = hxgep->p_dip;
4288 	MUTEX_ENTER(&hxge_common_lock);
4289 
4290 	/*
4291 	 * Loop through existing per Hydra hardware list.
4292 	 */
4293 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4294 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4295 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4296 		    hw_p, p_dip));
4297 		if (hw_p->parent_devp == p_dip) {
4298 			hxgep->hxge_hw_p = hw_p;
4299 			hw_p->ndevs++;
4300 			hw_p->hxge_p = hxgep;
4301 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4302 			    "==> hxge_init_common_device: "
4303 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4304 			    hw_p, p_dip, hw_p->ndevs));
4305 			break;
4306 		}
4307 	}
4308 
4309 	if (hw_p == NULL) {
4310 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4311 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4312 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4313 		hw_p->parent_devp = p_dip;
4314 		hw_p->magic = HXGE_MAGIC;
4315 		hxgep->hxge_hw_p = hw_p;
4316 		hw_p->ndevs++;
4317 		hw_p->hxge_p = hxgep;
4318 		hw_p->next = hxge_hw_list;
4319 
4320 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4321 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4322 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4323 
4324 		hxge_hw_list = hw_p;
4325 	}
4326 	MUTEX_EXIT(&hxge_common_lock);
4327 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4328 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4329 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4330 
4331 	return (HXGE_OK);
4332 }
4333 
4334 static void
4335 hxge_uninit_common_dev(p_hxge_t hxgep)
4336 {
4337 	p_hxge_hw_list_t	hw_p, h_hw_p;
4338 	dev_info_t		*p_dip;
4339 
4340 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4341 	if (hxgep->hxge_hw_p == NULL) {
4342 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4343 		    "<== hxge_uninit_common_dev (no common)"));
4344 		return;
4345 	}
4346 
4347 	MUTEX_ENTER(&hxge_common_lock);
4348 	h_hw_p = hxge_hw_list;
4349 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4350 		p_dip = hw_p->parent_devp;
4351 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4352 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4353 		    hw_p->magic == HXGE_MAGIC) {
4354 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4355 			    "==> hxge_uninit_common_dev: "
4356 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4357 			    hw_p, p_dip, hw_p->ndevs));
4358 
4359 			hxgep->hxge_hw_p = NULL;
4360 			if (hw_p->ndevs) {
4361 				hw_p->ndevs--;
4362 			}
4363 			hw_p->hxge_p = NULL;
4364 			if (!hw_p->ndevs) {
4365 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4366 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4367 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4368 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4369 				    "==> hxge_uninit_common_dev: "
4370 				    "hw_p $%p parent dip $%p ndevs %d (last)",
4371 				    hw_p, p_dip, hw_p->ndevs));
4372 
4373 				if (hw_p == hxge_hw_list) {
4374 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4375 					    "==> hxge_uninit_common_dev:"
4376 					    "remove head "
4377 					    "hw_p $%p parent dip $%p "
4378 					    "ndevs %d (head)",
4379 					    hw_p, p_dip, hw_p->ndevs));
4380 					hxge_hw_list = hw_p->next;
4381 				} else {
4382 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4383 					    "==> hxge_uninit_common_dev:"
4384 					    "remove middle "
4385 					    "hw_p $%p parent dip $%p "
4386 					    "ndevs %d (middle)",
4387 					    hw_p, p_dip, hw_p->ndevs));
4388 					h_hw_p->next = hw_p->next;
4389 				}
4390 
4391 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4392 			}
4393 			break;
4394 		} else {
4395 			h_hw_p = hw_p;
4396 		}
4397 	}
4398 
4399 	MUTEX_EXIT(&hxge_common_lock);
4400 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4401 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4402 
4403 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4404 }
4405 
4406 static void
4407 hxge_link_poll(void *arg)
4408 {
4409 	p_hxge_t		hxgep = (p_hxge_t)arg;
4410 	hpi_handle_t		handle;
4411 	p_hxge_stats_t		statsp;
4412 	cip_link_stat_t		link_stat;
4413 	hxge_timeout		*to = &hxgep->timeout;
4414 
4415 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
4416 	statsp = (p_hxge_stats_t)hxgep->statsp;
4417 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4418 
4419 	if (to->link_status != link_stat.bits.xpcs0_link_up) {
4420 		to->link_status = link_stat.bits.xpcs0_link_up;
4421 
4422 		if (link_stat.bits.xpcs0_link_up) {
4423 			mac_link_update(hxgep->mach, LINK_STATE_UP);
4424 			statsp->mac_stats.link_speed = 10000;
4425 			statsp->mac_stats.link_duplex = 2;
4426 			statsp->mac_stats.link_up = 1;
4427 		} else {
4428 			mac_link_update(hxgep->mach, LINK_STATE_DOWN);
4429 			statsp->mac_stats.link_speed = 0;
4430 			statsp->mac_stats.link_duplex = 0;
4431 			statsp->mac_stats.link_up = 0;
4432 		}
4433 	}
4434 
4435 	/* Restart the link status timer to check the link status */
4436 	MUTEX_ENTER(&to->lock);
4437 	to->id = timeout(hxge_link_poll, arg, to->ticks);
4438 	MUTEX_EXIT(&to->lock);
4439 }
4440