xref: /titanic_51/usr/src/uts/common/io/hxge/hxge_main.c (revision e1dd0a2f3a26050d1f183c1cafae42c4e3a0b57e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
30  */
31 #include <hxge_impl.h>
32 #include <hxge_pfc.h>
33 
34 /*
35  * PSARC/2007/453 MSI-X interrupt limit override
36  * (This PSARC case is limited to MSI-X vectors
37  *  and SPARC platforms only).
38  */
39 #if defined(_BIG_ENDIAN)
40 uint32_t hxge_msi_enable = 2;
41 #else
42 uint32_t hxge_msi_enable = 1;
43 #endif
44 
45 /*
46  * Globals: tunable parameters (/etc/system or adb)
47  *
48  */
49 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
50 uint32_t hxge_rbr_spare_size = 0;
51 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
52 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
53 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
54 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
55 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
56 uint32_t hxge_jumbo_mtu = TX_JUMBO_MTU;
57 boolean_t hxge_jumbo_enable = B_FALSE;
58 
59 static hxge_os_mutex_t hxgedebuglock;
60 static int hxge_debug_init = 0;
61 
62 /*
63  * Debugging flags:
64  *		hxge_no_tx_lb : transmit load balancing
65  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
66  *				   1 - From the Stack
67  *				   2 - Destination IP Address
68  */
69 uint32_t hxge_no_tx_lb = 0;
70 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
71 
72 /*
73  * Add tunable to reduce the amount of time spent in the
74  * ISR doing Rx Processing.
75  */
76 uint32_t hxge_max_rx_pkts = 1024;
77 
78 /*
79  * Tunables to manage the receive buffer blocks.
80  *
81  * hxge_rx_threshold_hi: copy all buffers.
82  * hxge_rx_bcopy_size_type: receive buffer block size type.
83  * hxge_rx_threshold_lo: copy only up to tunable block size type.
84  */
85 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
86 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
87 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3;
88 
89 rtrace_t hpi_rtracebuf;
90 
91 /*
92  * Function Prototypes
93  */
94 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
95 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
96 static void hxge_unattach(p_hxge_t);
97 
98 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
99 
100 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
101 static void hxge_destroy_mutexes(p_hxge_t);
102 
103 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
104 static void hxge_unmap_regs(p_hxge_t hxgep);
105 
106 hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
107 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
108 static void hxge_remove_intrs(p_hxge_t hxgep);
109 static void hxge_remove_soft_intrs(p_hxge_t hxgep);
110 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
111 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
112 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
113 void hxge_intrs_enable(p_hxge_t hxgep);
114 static void hxge_intrs_disable(p_hxge_t hxgep);
115 static void hxge_suspend(p_hxge_t);
116 static hxge_status_t hxge_resume(p_hxge_t);
117 hxge_status_t hxge_setup_dev(p_hxge_t);
118 static void hxge_destroy_dev(p_hxge_t);
119 hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
120 static void hxge_free_mem_pool(p_hxge_t);
121 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
122 static void hxge_free_rx_mem_pool(p_hxge_t);
123 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
124 static void hxge_free_tx_mem_pool(p_hxge_t);
125 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
126     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
127     p_hxge_dma_common_t);
128 static void hxge_dma_mem_free(p_hxge_dma_common_t);
129 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
130     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
131 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
132 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
133     p_hxge_dma_common_t *, size_t);
134 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
135 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
136     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
137 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
138 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
139     p_hxge_dma_common_t *, size_t);
140 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
141 static int hxge_init_common_dev(p_hxge_t);
142 static void hxge_uninit_common_dev(p_hxge_t);
143 
144 /*
145  * The next declarations are for the GLDv3 interface.
146  */
147 static int hxge_m_start(void *);
148 static void hxge_m_stop(void *);
149 static int hxge_m_unicst(void *, const uint8_t *);
150 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
151 static int hxge_m_promisc(void *, boolean_t);
152 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
153 static void hxge_m_resources(void *);
154 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
155 
156 static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
157 static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
158 static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
159 static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
160 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
161 
162 #define	HXGE_MAGIC	0x4E584745UL
163 #define	MAX_DUMP_SZ 256
164 
165 #define	HXGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
166 
167 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
168 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
169 
170 static mac_callbacks_t hxge_m_callbacks = {
171 	HXGE_M_CALLBACK_FLAGS,
172 	hxge_m_stat,
173 	hxge_m_start,
174 	hxge_m_stop,
175 	hxge_m_promisc,
176 	hxge_m_multicst,
177 	hxge_m_unicst,
178 	hxge_m_tx,
179 	hxge_m_resources,
180 	hxge_m_ioctl,
181 	hxge_m_getcapab
182 };
183 
184 /* Enable debug messages as necessary. */
185 uint64_t hxge_debug_level = 0x0;
186 
187 /*
188  * This list contains the instance structures for the Hydra
189  * devices present in the system. The lock exists to guarantee
190  * mutually exclusive access to the list.
191  */
192 void *hxge_list = NULL;
193 void *hxge_hw_list = NULL;
194 hxge_os_mutex_t hxge_common_lock;
195 
196 extern uint64_t hpi_debug_level;
197 
198 extern hxge_status_t hxge_ldgv_init();
199 extern hxge_status_t hxge_ldgv_uninit();
200 extern hxge_status_t hxge_intr_ldgv_init();
201 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
202     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
203 extern void hxge_fm_fini(p_hxge_t hxgep);
204 
205 /*
206  * Count used to maintain the number of buffers being used
207  * by Hydra instances and loaned up to the upper layers.
208  */
209 uint32_t hxge_mblks_pending = 0;
210 
211 /*
212  * Device register access attributes for PIO.
213  */
214 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
215 	DDI_DEVICE_ATTR_V0,
216 	DDI_STRUCTURE_LE_ACC,
217 	DDI_STRICTORDER_ACC,
218 };
219 
220 /*
221  * Device descriptor access attributes for DMA.
222  */
223 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
224 	DDI_DEVICE_ATTR_V0,
225 	DDI_STRUCTURE_LE_ACC,
226 	DDI_STRICTORDER_ACC
227 };
228 
229 /*
230  * Device buffer access attributes for DMA.
231  */
232 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
233 	DDI_DEVICE_ATTR_V0,
234 	DDI_STRUCTURE_BE_ACC,
235 	DDI_STRICTORDER_ACC
236 };
237 
238 ddi_dma_attr_t hxge_desc_dma_attr = {
239 	DMA_ATTR_V0,		/* version number. */
240 	0,			/* low address */
241 	0xffffffffffffffff,	/* high address */
242 	0xffffffffffffffff,	/* address counter max */
243 	0x100000,		/* alignment */
244 	0xfc00fc,		/* dlim_burstsizes */
245 	0x1,			/* minimum transfer size */
246 	0xffffffffffffffff,	/* maximum transfer size */
247 	0xffffffffffffffff,	/* maximum segment size */
248 	1,			/* scatter/gather list length */
249 	(unsigned int)1,	/* granularity */
250 	0			/* attribute flags */
251 };
252 
253 ddi_dma_attr_t hxge_tx_dma_attr = {
254 	DMA_ATTR_V0,		/* version number. */
255 	0,			/* low address */
256 	0xffffffffffffffff,	/* high address */
257 	0xffffffffffffffff,	/* address counter max */
258 #if defined(_BIG_ENDIAN)
259 	0x2000,			/* alignment */
260 #else
261 	0x1000,			/* alignment */
262 #endif
263 	0xfc00fc,		/* dlim_burstsizes */
264 	0x1,			/* minimum transfer size */
265 	0xffffffffffffffff,	/* maximum transfer size */
266 	0xffffffffffffffff,	/* maximum segment size */
267 	5,			/* scatter/gather list length */
268 	(unsigned int)1,	/* granularity */
269 	0			/* attribute flags */
270 };
271 
272 ddi_dma_attr_t hxge_rx_dma_attr = {
273 	DMA_ATTR_V0,		/* version number. */
274 	0,			/* low address */
275 	0xffffffffffffffff,	/* high address */
276 	0xffffffffffffffff,	/* address counter max */
277 	0x10000,		/* alignment */
278 	0xfc00fc,		/* dlim_burstsizes */
279 	0x1,			/* minimum transfer size */
280 	0xffffffffffffffff,	/* maximum transfer size */
281 	0xffffffffffffffff,	/* maximum segment size */
282 	1,			/* scatter/gather list length */
283 	(unsigned int)1,	/* granularity */
284 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
285 };
286 
287 ddi_dma_lim_t hxge_dma_limits = {
288 	(uint_t)0,		/* dlim_addr_lo */
289 	(uint_t)0xffffffff,	/* dlim_addr_hi */
290 	(uint_t)0xffffffff,	/* dlim_cntr_max */
291 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
292 	0x1,			/* dlim_minxfer */
293 	1024			/* dlim_speed */
294 };
295 
296 dma_method_t hxge_force_dma = DVMA;
297 
298 /*
299  * dma chunk sizes.
300  *
301  * Try to allocate the largest possible size
302  * so that fewer number of dma chunks would be managed
303  */
304 size_t alloc_sizes[] = {
305     0x1000, 0x2000, 0x4000, 0x8000,
306     0x10000, 0x20000, 0x40000, 0x80000,
307     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
308 };
309 
310 /*
311  * Translate "dev_t" to a pointer to the associated "dev_info_t".
312  */
313 static int
314 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
315 {
316 	p_hxge_t	hxgep = NULL;
317 	int		instance;
318 	int		status = DDI_SUCCESS;
319 
320 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
321 
322 	/*
323 	 * Get the device instance since we'll need to setup or retrieve a soft
324 	 * state for this instance.
325 	 */
326 	instance = ddi_get_instance(dip);
327 
328 	switch (cmd) {
329 	case DDI_ATTACH:
330 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
331 		break;
332 
333 	case DDI_RESUME:
334 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
335 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
336 		if (hxgep == NULL) {
337 			status = DDI_FAILURE;
338 			break;
339 		}
340 		if (hxgep->dip != dip) {
341 			status = DDI_FAILURE;
342 			break;
343 		}
344 		if (hxgep->suspended == DDI_PM_SUSPEND) {
345 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
346 		} else {
347 			(void) hxge_resume(hxgep);
348 		}
349 		goto hxge_attach_exit;
350 
351 	case DDI_PM_RESUME:
352 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
353 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
354 		if (hxgep == NULL) {
355 			status = DDI_FAILURE;
356 			break;
357 		}
358 		if (hxgep->dip != dip) {
359 			status = DDI_FAILURE;
360 			break;
361 		}
362 		(void) hxge_resume(hxgep);
363 		goto hxge_attach_exit;
364 
365 	default:
366 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
367 		status = DDI_FAILURE;
368 		goto hxge_attach_exit;
369 	}
370 
371 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
372 		status = DDI_FAILURE;
373 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
374 		    "ddi_soft_state_zalloc failed"));
375 		goto hxge_attach_exit;
376 	}
377 
378 	hxgep = ddi_get_soft_state(hxge_list, instance);
379 	if (hxgep == NULL) {
380 		status = HXGE_ERROR;
381 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
382 		    "ddi_get_soft_state failed"));
383 		goto hxge_attach_fail2;
384 	}
385 
386 	hxgep->drv_state = 0;
387 	hxgep->dip = dip;
388 	hxgep->instance = instance;
389 	hxgep->p_dip = ddi_get_parent(dip);
390 	hxgep->hxge_debug_level = hxge_debug_level;
391 	hpi_debug_level = hxge_debug_level;
392 
393 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
394 	    &hxge_rx_dma_attr);
395 
396 	status = hxge_map_regs(hxgep);
397 	if (status != HXGE_OK) {
398 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
399 		goto hxge_attach_fail3;
400 	}
401 
402 	status = hxge_init_common_dev(hxgep);
403 	if (status != HXGE_OK) {
404 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
405 		    "hxge_init_common_dev failed"));
406 		goto hxge_attach_fail4;
407 	}
408 
409 	/*
410 	 * Setup the Ndd parameters for this instance.
411 	 */
412 	hxge_init_param(hxgep);
413 
414 	/*
415 	 * Setup Register Tracing Buffer.
416 	 */
417 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
418 
419 	/* init stats ptr */
420 	hxge_init_statsp(hxgep);
421 
422 	status = hxge_get_config_properties(hxgep);
423 	if (status != HXGE_OK) {
424 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
425 		goto hxge_attach_fail;
426 	}
427 
428 	/*
429 	 * Setup the Kstats for the driver.
430 	 */
431 	hxge_setup_kstats(hxgep);
432 	hxge_setup_param(hxgep);
433 
434 	status = hxge_setup_system_dma_pages(hxgep);
435 	if (status != HXGE_OK) {
436 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
437 		goto hxge_attach_fail;
438 	}
439 
440 	hxge_hw_id_init(hxgep);
441 	hxge_hw_init_niu_common(hxgep);
442 
443 	status = hxge_setup_mutexes(hxgep);
444 	if (status != HXGE_OK) {
445 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
446 		goto hxge_attach_fail;
447 	}
448 
449 	status = hxge_setup_dev(hxgep);
450 	if (status != DDI_SUCCESS) {
451 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
452 		goto hxge_attach_fail;
453 	}
454 
455 	status = hxge_add_intrs(hxgep);
456 	if (status != DDI_SUCCESS) {
457 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
458 		goto hxge_attach_fail;
459 	}
460 
461 	status = hxge_add_soft_intrs(hxgep);
462 	if (status != DDI_SUCCESS) {
463 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
464 		goto hxge_attach_fail;
465 	}
466 
467 	/*
468 	 * Enable interrupts.
469 	 */
470 	hxge_intrs_enable(hxgep);
471 
472 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
473 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
474 		    "unable to register to mac layer (%d)", status));
475 		goto hxge_attach_fail;
476 	}
477 	mac_link_update(hxgep->mach, LINK_STATE_UP);
478 
479 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
480 	    instance));
481 
482 	goto hxge_attach_exit;
483 
484 hxge_attach_fail:
485 	hxge_unattach(hxgep);
486 	goto hxge_attach_fail1;
487 
488 hxge_attach_fail5:
489 	/*
490 	 * Tear down the ndd parameters setup.
491 	 */
492 	hxge_destroy_param(hxgep);
493 
494 	/*
495 	 * Tear down the kstat setup.
496 	 */
497 	hxge_destroy_kstats(hxgep);
498 
499 hxge_attach_fail4:
500 	if (hxgep->hxge_hw_p) {
501 		hxge_uninit_common_dev(hxgep);
502 		hxgep->hxge_hw_p = NULL;
503 	}
504 hxge_attach_fail3:
505 	/*
506 	 * Unmap the register setup.
507 	 */
508 	hxge_unmap_regs(hxgep);
509 
510 	hxge_fm_fini(hxgep);
511 
512 hxge_attach_fail2:
513 	ddi_soft_state_free(hxge_list, hxgep->instance);
514 
515 hxge_attach_fail1:
516 	if (status != HXGE_OK)
517 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
518 	hxgep = NULL;
519 
520 hxge_attach_exit:
521 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
522 	    status));
523 
524 	return (status);
525 }
526 
527 static int
528 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
529 {
530 	int		status = DDI_SUCCESS;
531 	int		instance;
532 	p_hxge_t	hxgep = NULL;
533 
534 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
535 	instance = ddi_get_instance(dip);
536 	hxgep = ddi_get_soft_state(hxge_list, instance);
537 	if (hxgep == NULL) {
538 		status = DDI_FAILURE;
539 		goto hxge_detach_exit;
540 	}
541 
542 	switch (cmd) {
543 	case DDI_DETACH:
544 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
545 		break;
546 
547 	case DDI_PM_SUSPEND:
548 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
549 		hxgep->suspended = DDI_PM_SUSPEND;
550 		hxge_suspend(hxgep);
551 		break;
552 
553 	case DDI_SUSPEND:
554 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
555 		if (hxgep->suspended != DDI_PM_SUSPEND) {
556 			hxgep->suspended = DDI_SUSPEND;
557 			hxge_suspend(hxgep);
558 		}
559 		break;
560 
561 	default:
562 		status = DDI_FAILURE;
563 		break;
564 	}
565 
566 	if (cmd != DDI_DETACH)
567 		goto hxge_detach_exit;
568 
569 	/*
570 	 * Stop the xcvr polling.
571 	 */
572 	hxgep->suspended = cmd;
573 
574 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
575 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
576 		    "<== hxge_detach status = 0x%08X", status));
577 		return (DDI_FAILURE);
578 	}
579 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
580 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
581 
582 	hxge_unattach(hxgep);
583 	hxgep = NULL;
584 
585 hxge_detach_exit:
586 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
587 	    status));
588 
589 	return (status);
590 }
591 
592 static void
593 hxge_unattach(p_hxge_t hxgep)
594 {
595 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
596 
597 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
598 		return;
599 	}
600 
601 	if (hxgep->hxge_hw_p) {
602 		hxge_uninit_common_dev(hxgep);
603 		hxgep->hxge_hw_p = NULL;
604 	}
605 
606 	if (hxgep->hxge_timerid) {
607 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
608 		hxgep->hxge_timerid = 0;
609 	}
610 
611 	/* Stop any further interrupts. */
612 	hxge_remove_intrs(hxgep);
613 
614 	/* Remove soft interrups */
615 	hxge_remove_soft_intrs(hxgep);
616 
617 	/* Stop the device and free resources. */
618 	hxge_destroy_dev(hxgep);
619 
620 	/* Tear down the ndd parameters setup. */
621 	hxge_destroy_param(hxgep);
622 
623 	/* Tear down the kstat setup. */
624 	hxge_destroy_kstats(hxgep);
625 
626 	/* Destroy all mutexes.  */
627 	hxge_destroy_mutexes(hxgep);
628 
629 	/*
630 	 * Remove the list of ndd parameters which were setup during attach.
631 	 */
632 	if (hxgep->dip) {
633 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
634 		    " hxge_unattach: remove all properties"));
635 		(void) ddi_prop_remove_all(hxgep->dip);
636 	}
637 
638 	/*
639 	 * Unmap the register setup.
640 	 */
641 	hxge_unmap_regs(hxgep);
642 
643 	hxge_fm_fini(hxgep);
644 
645 	/*
646 	 * Free the soft state data structures allocated with this instance.
647 	 */
648 	ddi_soft_state_free(hxge_list, hxgep->instance);
649 
650 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
651 }
652 
653 static hxge_status_t
654 hxge_map_regs(p_hxge_t hxgep)
655 {
656 	int		ddi_status = DDI_SUCCESS;
657 	p_dev_regs_t	dev_regs;
658 
659 #ifdef	HXGE_DEBUG
660 	char		*sysname;
661 #endif
662 
663 	off_t		regsize;
664 	hxge_status_t	status = HXGE_OK;
665 	int		nregs;
666 
667 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
668 
669 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
670 		return (HXGE_ERROR);
671 
672 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
673 
674 	hxgep->dev_regs = NULL;
675 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
676 	dev_regs->hxge_regh = NULL;
677 	dev_regs->hxge_pciregh = NULL;
678 	dev_regs->hxge_msix_regh = NULL;
679 
680 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
681 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
682 	    "hxge_map_regs: pci config size 0x%x", regsize));
683 
684 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
685 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
686 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
687 	if (ddi_status != DDI_SUCCESS) {
688 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
689 		    "ddi_map_regs, hxge bus config regs failed"));
690 		goto hxge_map_regs_fail0;
691 	}
692 
693 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
694 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
695 	    dev_regs->hxge_pciregp,
696 	    dev_regs->hxge_pciregh));
697 
698 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
699 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
700 	    "hxge_map_regs: pio size 0x%x", regsize));
701 
702 	/* set up the device mapped register */
703 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
704 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
705 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
706 
707 	if (ddi_status != DDI_SUCCESS) {
708 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
709 		    "ddi_map_regs for Hydra global reg failed"));
710 		goto hxge_map_regs_fail1;
711 	}
712 
713 	/* set up the msi/msi-x mapped register */
714 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
715 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
716 	    "hxge_map_regs: msix size 0x%x", regsize));
717 
718 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
719 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
720 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
721 
722 	if (ddi_status != DDI_SUCCESS) {
723 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
724 		    "ddi_map_regs for msi reg failed"));
725 		goto hxge_map_regs_fail2;
726 	}
727 
728 	hxgep->dev_regs = dev_regs;
729 
730 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
731 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
732 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
733 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
734 
735 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
736 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
737 
738 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
739 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
740 
741 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
742 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
743 
744 	goto hxge_map_regs_exit;
745 
746 hxge_map_regs_fail3:
747 	if (dev_regs->hxge_msix_regh) {
748 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
749 	}
750 
751 hxge_map_regs_fail2:
752 	if (dev_regs->hxge_regh) {
753 		ddi_regs_map_free(&dev_regs->hxge_regh);
754 	}
755 
756 hxge_map_regs_fail1:
757 	if (dev_regs->hxge_pciregh) {
758 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
759 	}
760 
761 hxge_map_regs_fail0:
762 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
763 	kmem_free(dev_regs, sizeof (dev_regs_t));
764 
765 hxge_map_regs_exit:
766 	if (ddi_status != DDI_SUCCESS)
767 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
768 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
769 	return (status);
770 }
771 
772 static void
773 hxge_unmap_regs(p_hxge_t hxgep)
774 {
775 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
776 	if (hxgep->dev_regs) {
777 		if (hxgep->dev_regs->hxge_pciregh) {
778 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
779 			    "==> hxge_unmap_regs: bus"));
780 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
781 			hxgep->dev_regs->hxge_pciregh = NULL;
782 		}
783 
784 		if (hxgep->dev_regs->hxge_regh) {
785 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
786 			    "==> hxge_unmap_regs: device registers"));
787 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
788 			hxgep->dev_regs->hxge_regh = NULL;
789 		}
790 
791 		if (hxgep->dev_regs->hxge_msix_regh) {
792 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
793 			    "==> hxge_unmap_regs: device interrupts"));
794 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
795 			hxgep->dev_regs->hxge_msix_regh = NULL;
796 		}
797 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
798 		hxgep->dev_regs = NULL;
799 	}
800 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
801 }
802 
803 static hxge_status_t
804 hxge_setup_mutexes(p_hxge_t hxgep)
805 {
806 	int		ddi_status = DDI_SUCCESS;
807 	hxge_status_t	status = HXGE_OK;
808 
809 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
810 
811 	/*
812 	 * Get the interrupt cookie so the mutexes can be Initialised.
813 	 */
814 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
815 	    &hxgep->interrupt_cookie);
816 
817 	if (ddi_status != DDI_SUCCESS) {
818 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
819 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
820 		goto hxge_setup_mutexes_exit;
821 	}
822 
823 	/*
824 	 * Initialize mutex's for this device.
825 	 */
826 	MUTEX_INIT(hxgep->genlock, NULL,
827 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
828 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
829 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
830 	RW_INIT(&hxgep->filter_lock, NULL,
831 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
832 
833 hxge_setup_mutexes_exit:
834 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
835 	    "<== hxge_setup_mutexes status = %x", status));
836 
837 	if (ddi_status != DDI_SUCCESS)
838 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
839 
840 	return (status);
841 }
842 
843 static void
844 hxge_destroy_mutexes(p_hxge_t hxgep)
845 {
846 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
847 	RW_DESTROY(&hxgep->filter_lock);
848 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
849 	MUTEX_DESTROY(hxgep->genlock);
850 
851 	if (hxge_debug_init == 1) {
852 		MUTEX_DESTROY(&hxgedebuglock);
853 		hxge_debug_init = 0;
854 	}
855 
856 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
857 }
858 
859 hxge_status_t
860 hxge_init(p_hxge_t hxgep)
861 {
862 	hxge_status_t status = HXGE_OK;
863 
864 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
865 
866 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
867 		return (status);
868 	}
869 
870 	/*
871 	 * Allocate system memory for the receive/transmit buffer blocks and
872 	 * receive/transmit descriptor rings.
873 	 */
874 	status = hxge_alloc_mem_pool(hxgep);
875 	if (status != HXGE_OK) {
876 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
877 		goto hxge_init_fail1;
878 	}
879 
880 	/*
881 	 * Initialize and enable TXDMA channels.
882 	 */
883 	status = hxge_init_txdma_channels(hxgep);
884 	if (status != HXGE_OK) {
885 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
886 		goto hxge_init_fail3;
887 	}
888 
889 	/*
890 	 * Initialize and enable RXDMA channels.
891 	 */
892 	status = hxge_init_rxdma_channels(hxgep);
893 	if (status != HXGE_OK) {
894 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
895 		goto hxge_init_fail4;
896 	}
897 
898 	/*
899 	 * Initialize TCAM
900 	 */
901 	status = hxge_classify_init(hxgep);
902 	if (status != HXGE_OK) {
903 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
904 		goto hxge_init_fail5;
905 	}
906 
907 	/*
908 	 * Initialize the VMAC block.
909 	 */
910 	status = hxge_vmac_init(hxgep);
911 	if (status != HXGE_OK) {
912 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
913 		goto hxge_init_fail5;
914 	}
915 
916 	/* Bringup - this may be unnecessary when PXE and FCODE available */
917 	status = hxge_pfc_set_default_mac_addr(hxgep);
918 	if (status != HXGE_OK) {
919 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
920 		    "Default Address Failure\n"));
921 		goto hxge_init_fail5;
922 	}
923 
924 	hxge_intrs_enable(hxgep);
925 
926 	/*
927 	 * Enable hardware interrupts.
928 	 */
929 	hxge_intr_hw_enable(hxgep);
930 	hxgep->drv_state |= STATE_HW_INITIALIZED;
931 
932 	goto hxge_init_exit;
933 
934 hxge_init_fail5:
935 	hxge_uninit_rxdma_channels(hxgep);
936 hxge_init_fail4:
937 	hxge_uninit_txdma_channels(hxgep);
938 hxge_init_fail3:
939 	hxge_free_mem_pool(hxgep);
940 hxge_init_fail1:
941 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
942 	    "<== hxge_init status (failed) = 0x%08x", status));
943 	return (status);
944 
945 hxge_init_exit:
946 
947 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
948 	    status));
949 
950 	return (status);
951 }
952 
953 timeout_id_t
954 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
955 {
956 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
957 		return (timeout(func, (caddr_t)hxgep,
958 		    drv_usectohz(1000 * msec)));
959 	}
960 	return (NULL);
961 }
962 
963 /*ARGSUSED*/
964 void
965 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
966 {
967 	if (timerid) {
968 		(void) untimeout(timerid);
969 	}
970 }
971 
972 void
973 hxge_uninit(p_hxge_t hxgep)
974 {
975 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
976 
977 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
978 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
979 		    "==> hxge_uninit: not initialized"));
980 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
981 		return;
982 	}
983 
984 	/* Stop timer */
985 	if (hxgep->hxge_timerid) {
986 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
987 		hxgep->hxge_timerid = 0;
988 	}
989 
990 	(void) hxge_intr_hw_disable(hxgep);
991 
992 	/* Reset the receive VMAC side.  */
993 	(void) hxge_rx_vmac_disable(hxgep);
994 
995 	/* Free classification resources */
996 	(void) hxge_classify_uninit(hxgep);
997 
998 	/* Reset the transmit/receive DMA side.  */
999 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1000 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1001 
1002 	hxge_uninit_txdma_channels(hxgep);
1003 	hxge_uninit_rxdma_channels(hxgep);
1004 
1005 	/* Reset the transmit VMAC side.  */
1006 	(void) hxge_tx_vmac_disable(hxgep);
1007 
1008 	hxge_free_mem_pool(hxgep);
1009 
1010 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1011 
1012 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1013 }
1014 
1015 void
1016 hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
1017 {
1018 #if defined(__i386)
1019 	size_t		reg;
1020 #else
1021 	uint64_t	reg;
1022 #endif
1023 	uint64_t	regdata;
1024 	int		i, retry;
1025 
1026 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1027 	regdata = 0;
1028 	retry = 1;
1029 
1030 	for (i = 0; i < retry; i++) {
1031 		HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
1032 	}
1033 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1034 }
1035 
1036 void
1037 hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
1038 {
1039 #if defined(__i386)
1040 	size_t		reg;
1041 #else
1042 	uint64_t	reg;
1043 #endif
1044 	uint64_t	buf[2];
1045 
1046 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1047 #if defined(__i386)
1048 	reg = (size_t)buf[0];
1049 #else
1050 	reg = buf[0];
1051 #endif
1052 
1053 	HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
1054 }
1055 
1056 /*ARGSUSED*/
1057 /*VARARGS*/
1058 void
1059 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1060 {
1061 	char		msg_buffer[1048];
1062 	char		prefix_buffer[32];
1063 	int		instance;
1064 	uint64_t	debug_level;
1065 	int		cmn_level = CE_CONT;
1066 	va_list		ap;
1067 
1068 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1069 	    hxgep->hxge_debug_level;
1070 
1071 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1072 	    (level == HXGE_ERR_CTL)) {
1073 		/* do the msg processing */
1074 		if (hxge_debug_init == 0) {
1075 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1076 			hxge_debug_init = 1;
1077 		}
1078 
1079 		MUTEX_ENTER(&hxgedebuglock);
1080 
1081 		if ((level & HXGE_NOTE)) {
1082 			cmn_level = CE_NOTE;
1083 		}
1084 
1085 		if (level & HXGE_ERR_CTL) {
1086 			cmn_level = CE_WARN;
1087 		}
1088 
1089 		va_start(ap, fmt);
1090 		(void) vsprintf(msg_buffer, fmt, ap);
1091 		va_end(ap);
1092 
1093 		if (hxgep == NULL) {
1094 			instance = -1;
1095 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1096 		} else {
1097 			instance = hxgep->instance;
1098 			(void) sprintf(prefix_buffer,
1099 			    "%s%d :", "hxge", instance);
1100 		}
1101 
1102 		MUTEX_EXIT(&hxgedebuglock);
1103 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1104 	}
1105 }
1106 
1107 char *
1108 hxge_dump_packet(char *addr, int size)
1109 {
1110 	uchar_t		*ap = (uchar_t *)addr;
1111 	int		i;
1112 	static char	etherbuf[1024];
1113 	char		*cp = etherbuf;
1114 	char		digits[] = "0123456789abcdef";
1115 
1116 	if (!size)
1117 		size = 60;
1118 
1119 	if (size > MAX_DUMP_SZ) {
1120 		/* Dump the leading bytes */
1121 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1122 			if (*ap > 0x0f)
1123 				*cp++ = digits[*ap >> 4];
1124 			*cp++ = digits[*ap++ & 0xf];
1125 			*cp++ = ':';
1126 		}
1127 		for (i = 0; i < 20; i++)
1128 			*cp++ = '.';
1129 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1130 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1131 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1132 			if (*ap > 0x0f)
1133 				*cp++ = digits[*ap >> 4];
1134 			*cp++ = digits[*ap++ & 0xf];
1135 			*cp++ = ':';
1136 		}
1137 	} else {
1138 		for (i = 0; i < size; i++) {
1139 			if (*ap > 0x0f)
1140 				*cp++ = digits[*ap >> 4];
1141 			*cp++ = digits[*ap++ & 0xf];
1142 			*cp++ = ':';
1143 		}
1144 	}
1145 	*--cp = 0;
1146 	return (etherbuf);
1147 }
1148 
1149 static void
1150 hxge_suspend(p_hxge_t hxgep)
1151 {
1152 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1153 
1154 	hxge_intrs_disable(hxgep);
1155 	hxge_destroy_dev(hxgep);
1156 
1157 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1158 }
1159 
1160 static hxge_status_t
1161 hxge_resume(p_hxge_t hxgep)
1162 {
1163 	hxge_status_t status = HXGE_OK;
1164 
1165 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1166 	hxgep->suspended = DDI_RESUME;
1167 
1168 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1169 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1170 
1171 	(void) hxge_rx_vmac_enable(hxgep);
1172 	(void) hxge_tx_vmac_enable(hxgep);
1173 
1174 	hxge_intrs_enable(hxgep);
1175 
1176 	hxgep->suspended = 0;
1177 
1178 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1179 	    "<== hxge_resume status = 0x%x", status));
1180 
1181 	return (status);
1182 }
1183 
1184 hxge_status_t
1185 hxge_setup_dev(p_hxge_t hxgep)
1186 {
1187 	hxge_status_t status = HXGE_OK;
1188 
1189 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1190 
1191 	status = hxge_link_init(hxgep);
1192 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1193 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1194 		    "Bad register acc handle"));
1195 		status = HXGE_ERROR;
1196 	}
1197 
1198 	if (status != HXGE_OK) {
1199 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1200 		    " hxge_setup_dev status (link init 0x%08x)", status));
1201 		goto hxge_setup_dev_exit;
1202 	}
1203 
1204 hxge_setup_dev_exit:
1205 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1206 	    "<== hxge_setup_dev status = 0x%08x", status));
1207 
1208 	return (status);
1209 }
1210 
1211 static void
1212 hxge_destroy_dev(p_hxge_t hxgep)
1213 {
1214 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1215 
1216 	(void) hxge_hw_stop(hxgep);
1217 
1218 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1219 }
1220 
1221 static hxge_status_t
1222 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1223 {
1224 	int			ddi_status = DDI_SUCCESS;
1225 	uint_t			count;
1226 	ddi_dma_cookie_t	cookie;
1227 	uint_t			iommu_pagesize;
1228 	hxge_status_t		status = HXGE_OK;
1229 
1230 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1231 
1232 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1233 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1234 
1235 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1236 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1237 	    " default_block_size %d iommu_pagesize %d",
1238 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1239 	    hxgep->rx_default_block_size, iommu_pagesize));
1240 
1241 	if (iommu_pagesize != 0) {
1242 		if (hxgep->sys_page_sz == iommu_pagesize) {
1243 			/* Hydra support up to 8K pages */
1244 			if (iommu_pagesize > 0x2000)
1245 				hxgep->sys_page_sz = 0x2000;
1246 		} else {
1247 			if (hxgep->sys_page_sz > iommu_pagesize)
1248 				hxgep->sys_page_sz = iommu_pagesize;
1249 		}
1250 	}
1251 
1252 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1253 
1254 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1255 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1256 	    "default_block_size %d page mask %d",
1257 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1258 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1259 
1260 	switch (hxgep->sys_page_sz) {
1261 	default:
1262 		hxgep->sys_page_sz = 0x1000;
1263 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1264 		hxgep->rx_default_block_size = 0x1000;
1265 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1266 		break;
1267 	case 0x1000:
1268 		hxgep->rx_default_block_size = 0x1000;
1269 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1270 		break;
1271 	case 0x2000:
1272 		hxgep->rx_default_block_size = 0x2000;
1273 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1274 		break;
1275 	}
1276 
1277 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1278 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1279 	hxge_desc_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1280 
1281 	/*
1282 	 * Get the system DMA burst size.
1283 	 */
1284 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1285 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1286 	if (ddi_status != DDI_SUCCESS) {
1287 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1288 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1289 		goto hxge_get_soft_properties_exit;
1290 	}
1291 
1292 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1293 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1294 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1295 	    &cookie, &count);
1296 	if (ddi_status != DDI_DMA_MAPPED) {
1297 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1298 		    "Binding spare handle to find system burstsize failed."));
1299 		ddi_status = DDI_FAILURE;
1300 		goto hxge_get_soft_properties_fail1;
1301 	}
1302 
1303 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1304 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1305 
1306 hxge_get_soft_properties_fail1:
1307 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1308 
1309 hxge_get_soft_properties_exit:
1310 
1311 	if (ddi_status != DDI_SUCCESS)
1312 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1313 
1314 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1315 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1316 
1317 	return (status);
1318 }
1319 
1320 hxge_status_t
1321 hxge_alloc_mem_pool(p_hxge_t hxgep)
1322 {
1323 	hxge_status_t status = HXGE_OK;
1324 
1325 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1326 
1327 	status = hxge_alloc_rx_mem_pool(hxgep);
1328 	if (status != HXGE_OK) {
1329 		return (HXGE_ERROR);
1330 	}
1331 
1332 	status = hxge_alloc_tx_mem_pool(hxgep);
1333 	if (status != HXGE_OK) {
1334 		hxge_free_rx_mem_pool(hxgep);
1335 		return (HXGE_ERROR);
1336 	}
1337 
1338 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1339 	return (HXGE_OK);
1340 }
1341 
1342 static void
1343 hxge_free_mem_pool(p_hxge_t hxgep)
1344 {
1345 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1346 
1347 	hxge_free_rx_mem_pool(hxgep);
1348 	hxge_free_tx_mem_pool(hxgep);
1349 
1350 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1351 }
1352 
1353 static hxge_status_t
1354 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1355 {
1356 	int			i, j;
1357 	uint32_t		ndmas, st_rdc;
1358 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1359 	p_hxge_hw_pt_cfg_t	p_cfgp;
1360 	p_hxge_dma_pool_t	dma_poolp;
1361 	p_hxge_dma_common_t	*dma_buf_p;
1362 	p_hxge_dma_pool_t	dma_cntl_poolp;
1363 	p_hxge_dma_common_t	*dma_cntl_p;
1364 	size_t			rx_buf_alloc_size;
1365 	size_t			rx_cntl_alloc_size;
1366 	uint32_t		*num_chunks;	/* per dma */
1367 	hxge_status_t		status = HXGE_OK;
1368 
1369 	uint32_t		hxge_port_rbr_size;
1370 	uint32_t		hxge_port_rbr_spare_size;
1371 	uint32_t		hxge_port_rcr_size;
1372 
1373 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1374 
1375 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1376 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1377 	st_rdc = p_cfgp->start_rdc;
1378 	ndmas = p_cfgp->max_rdcs;
1379 
1380 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1381 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1382 
1383 	/*
1384 	 * Allocate memory for each receive DMA channel.
1385 	 */
1386 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1387 	    KM_SLEEP);
1388 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1389 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1390 
1391 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1392 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1393 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1394 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1395 
1396 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1397 	    KM_SLEEP);
1398 
1399 	/*
1400 	 * Assume that each DMA channel will be configured with default block
1401 	 * size. rbr block counts are mod of batch count (16).
1402 	 */
1403 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1404 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1405 
1406 	if (!hxge_port_rbr_size) {
1407 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1408 	}
1409 
1410 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1411 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1412 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1413 	}
1414 
1415 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1416 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1417 
1418 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1419 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1420 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1421 	}
1422 
1423 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1424 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1425 
1426 	/*
1427 	 * Addresses of receive block ring, receive completion ring and the
1428 	 * mailbox must be all cache-aligned (64 bytes).
1429 	 */
1430 	rx_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1431 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
1432 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * hxge_port_rcr_size);
1433 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
1434 
1435 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1436 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1437 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1438 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1439 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1440 
1441 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1442 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1443 
1444 	/*
1445 	 * Allocate memory for receive buffers and descriptor rings. Replace
1446 	 * allocation functions with interface functions provided by the
1447 	 * partition manager when it is available.
1448 	 */
1449 	/*
1450 	 * Allocate memory for the receive buffer blocks.
1451 	 */
1452 	for (i = 0; i < ndmas; i++) {
1453 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1454 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1455 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1456 		    i, dma_buf_p[i], &dma_buf_p[i]));
1457 
1458 		num_chunks[i] = 0;
1459 
1460 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1461 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1462 		    &num_chunks[i]);
1463 		if (status != HXGE_OK) {
1464 			break;
1465 		}
1466 
1467 		st_rdc++;
1468 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1469 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1470 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1471 		    dma_buf_p[i], &dma_buf_p[i]));
1472 	}
1473 
1474 	if (i < ndmas) {
1475 		goto hxge_alloc_rx_mem_fail1;
1476 	}
1477 
1478 	/*
1479 	 * Allocate memory for descriptor rings and mailbox.
1480 	 */
1481 	st_rdc = p_cfgp->start_rdc;
1482 	for (j = 0; j < ndmas; j++) {
1483 		status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, &dma_cntl_p[j],
1484 		    rx_cntl_alloc_size);
1485 		if (status != HXGE_OK) {
1486 			break;
1487 		}
1488 		st_rdc++;
1489 	}
1490 
1491 	if (j < ndmas) {
1492 		goto hxge_alloc_rx_mem_fail2;
1493 	}
1494 
1495 	dma_poolp->ndmas = ndmas;
1496 	dma_poolp->num_chunks = num_chunks;
1497 	dma_poolp->buf_allocated = B_TRUE;
1498 	hxgep->rx_buf_pool_p = dma_poolp;
1499 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1500 
1501 	dma_cntl_poolp->ndmas = ndmas;
1502 	dma_cntl_poolp->buf_allocated = B_TRUE;
1503 	hxgep->rx_cntl_pool_p = dma_cntl_poolp;
1504 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
1505 
1506 	goto hxge_alloc_rx_mem_pool_exit;
1507 
1508 hxge_alloc_rx_mem_fail2:
1509 	/* Free control buffers */
1510 	j--;
1511 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1512 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1513 	for (; j >= 0; j--) {
1514 		hxge_free_rx_cntl_dma(hxgep,
1515 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
1516 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1517 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1518 	}
1519 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1520 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1521 
1522 hxge_alloc_rx_mem_fail1:
1523 	/* Free data buffers */
1524 	i--;
1525 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1526 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1527 	for (; i >= 0; i--) {
1528 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1529 		    num_chunks[i]);
1530 	}
1531 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1532 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1533 
1534 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1535 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1536 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1537 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
1538 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1539 
1540 hxge_alloc_rx_mem_pool_exit:
1541 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1542 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1543 
1544 	return (status);
1545 }
1546 
1547 static void
1548 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1549 {
1550 	uint32_t		i, ndmas;
1551 	p_hxge_dma_pool_t	dma_poolp;
1552 	p_hxge_dma_common_t	*dma_buf_p;
1553 	p_hxge_dma_pool_t	dma_cntl_poolp;
1554 	p_hxge_dma_common_t	*dma_cntl_p;
1555 	uint32_t		*num_chunks;
1556 
1557 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1558 
1559 	dma_poolp = hxgep->rx_buf_pool_p;
1560 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1561 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1562 		    "(null rx buf pool or buf not allocated"));
1563 		return;
1564 	}
1565 
1566 	dma_cntl_poolp = hxgep->rx_cntl_pool_p;
1567 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
1568 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1569 		    "<== hxge_free_rx_mem_pool "
1570 		    "(null rx cntl buf pool or cntl buf not allocated"));
1571 		return;
1572 	}
1573 
1574 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1575 	num_chunks = dma_poolp->num_chunks;
1576 
1577 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
1578 	ndmas = dma_cntl_poolp->ndmas;
1579 
1580 	for (i = 0; i < ndmas; i++) {
1581 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1582 	}
1583 
1584 	for (i = 0; i < ndmas; i++) {
1585 		hxge_free_rx_cntl_dma(hxgep, dma_cntl_p[i]);
1586 	}
1587 
1588 	for (i = 0; i < ndmas; i++) {
1589 		KMEM_FREE(dma_buf_p[i],
1590 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1591 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
1592 	}
1593 
1594 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1595 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1596 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
1597 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1598 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1599 
1600 	hxgep->rx_buf_pool_p = NULL;
1601 	hxgep->rx_cntl_pool_p = NULL;
1602 
1603 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1604 }
1605 
1606 static hxge_status_t
1607 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1608     p_hxge_dma_common_t *dmap,
1609     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1610 {
1611 	p_hxge_dma_common_t	rx_dmap;
1612 	hxge_status_t		status = HXGE_OK;
1613 	size_t			total_alloc_size;
1614 	size_t			allocated = 0;
1615 	int			i, size_index, array_size;
1616 
1617 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1618 
1619 	rx_dmap = (p_hxge_dma_common_t)
1620 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1621 
1622 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1623 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1624 	    dma_channel, alloc_size, block_size, dmap));
1625 
1626 	total_alloc_size = alloc_size;
1627 
1628 	i = 0;
1629 	size_index = 0;
1630 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1631 	while ((alloc_sizes[size_index] < alloc_size) &&
1632 	    (size_index < array_size))
1633 		size_index++;
1634 	if (size_index >= array_size) {
1635 		size_index = array_size - 1;
1636 	}
1637 
1638 	while ((allocated < total_alloc_size) &&
1639 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1640 		rx_dmap[i].dma_chunk_index = i;
1641 		rx_dmap[i].block_size = block_size;
1642 		rx_dmap[i].alength = alloc_sizes[size_index];
1643 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1644 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1645 		rx_dmap[i].dma_channel = dma_channel;
1646 		rx_dmap[i].contig_alloc_type = B_FALSE;
1647 
1648 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1649 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1650 		    "i %d nblocks %d alength %d",
1651 		    dma_channel, i, &rx_dmap[i], block_size,
1652 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1653 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1654 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1655 		    &hxge_dev_buf_dma_acc_attr,
1656 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1657 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1658 		if (status != HXGE_OK) {
1659 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1660 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1661 			    " for size: %d", alloc_sizes[size_index]));
1662 			size_index--;
1663 		} else {
1664 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1665 			    " alloc_rx_buf_dma allocated rdc %d "
1666 			    "chunk %d size %x dvma %x bufp %llx ",
1667 			    dma_channel, i, rx_dmap[i].alength,
1668 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1669 			i++;
1670 			allocated += alloc_sizes[size_index];
1671 		}
1672 	}
1673 
1674 	if (allocated < total_alloc_size) {
1675 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1676 		    " hxge_alloc_rx_buf_dma failed due to"
1677 		    " allocated(%d) < required(%d)",
1678 		    allocated, total_alloc_size));
1679 		goto hxge_alloc_rx_mem_fail1;
1680 	}
1681 
1682 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1683 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1684 
1685 	*num_chunks = i;
1686 	*dmap = rx_dmap;
1687 
1688 	goto hxge_alloc_rx_mem_exit;
1689 
1690 hxge_alloc_rx_mem_fail1:
1691 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1692 
1693 hxge_alloc_rx_mem_exit:
1694 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1695 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1696 
1697 	return (status);
1698 }
1699 
1700 /*ARGSUSED*/
1701 static void
1702 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1703     uint32_t num_chunks)
1704 {
1705 	int i;
1706 
1707 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1708 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1709 
1710 	for (i = 0; i < num_chunks; i++) {
1711 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1712 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1713 		hxge_dma_mem_free(dmap++);
1714 	}
1715 
1716 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1717 }
1718 
1719 /*ARGSUSED*/
1720 static hxge_status_t
1721 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1722     p_hxge_dma_common_t *dmap, size_t size)
1723 {
1724 	p_hxge_dma_common_t	rx_dmap;
1725 	hxge_status_t		status = HXGE_OK;
1726 
1727 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1728 
1729 	rx_dmap = (p_hxge_dma_common_t)
1730 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1731 
1732 	rx_dmap->contig_alloc_type = B_FALSE;
1733 
1734 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1735 	    &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
1736 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1737 	if (status != HXGE_OK) {
1738 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1739 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1740 		    " for size: %d", size));
1741 		goto hxge_alloc_rx_cntl_dma_fail1;
1742 	}
1743 
1744 	*dmap = rx_dmap;
1745 
1746 	goto hxge_alloc_rx_cntl_dma_exit;
1747 
1748 hxge_alloc_rx_cntl_dma_fail1:
1749 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1750 
1751 hxge_alloc_rx_cntl_dma_exit:
1752 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1753 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1754 
1755 	return (status);
1756 }
1757 
1758 /*ARGSUSED*/
1759 static void
1760 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1761 {
1762 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1763 
1764 	hxge_dma_mem_free(dmap);
1765 
1766 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1767 }
1768 
1769 static hxge_status_t
1770 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1771 {
1772 	hxge_status_t		status = HXGE_OK;
1773 	int			i, j;
1774 	uint32_t		ndmas, st_tdc;
1775 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1776 	p_hxge_hw_pt_cfg_t	p_cfgp;
1777 	p_hxge_dma_pool_t	dma_poolp;
1778 	p_hxge_dma_common_t	*dma_buf_p;
1779 	p_hxge_dma_pool_t	dma_cntl_poolp;
1780 	p_hxge_dma_common_t	*dma_cntl_p;
1781 	size_t			tx_buf_alloc_size;
1782 	size_t			tx_cntl_alloc_size;
1783 	uint32_t		*num_chunks;	/* per dma */
1784 
1785 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1786 
1787 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1788 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1789 	st_tdc = p_cfgp->start_tdc;
1790 	ndmas = p_cfgp->max_tdcs;
1791 
1792 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1793 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1794 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1795 	/*
1796 	 * Allocate memory for each transmit DMA channel.
1797 	 */
1798 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1799 	    KM_SLEEP);
1800 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1801 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1802 
1803 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1804 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1805 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1806 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1807 
1808 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1809 
1810 	/*
1811 	 * Assume that each DMA channel will be configured with default
1812 	 * transmit bufer size for copying transmit data. (For packet payload
1813 	 * over this limit, packets will not be copied.)
1814 	 */
1815 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1816 
1817 	/*
1818 	 * Addresses of transmit descriptor ring and the mailbox must be all
1819 	 * cache-aligned (64 bytes).
1820 	 */
1821 	tx_cntl_alloc_size = hxge_tx_ring_size;
1822 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1823 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1824 
1825 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1826 	    KM_SLEEP);
1827 
1828 	/*
1829 	 * Allocate memory for transmit buffers and descriptor rings. Replace
1830 	 * allocation functions with interface functions provided by the
1831 	 * partition manager when it is available.
1832 	 *
1833 	 * Allocate memory for the transmit buffer pool.
1834 	 */
1835 	for (i = 0; i < ndmas; i++) {
1836 		num_chunks[i] = 0;
1837 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1838 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1839 		if (status != HXGE_OK) {
1840 			break;
1841 		}
1842 		st_tdc++;
1843 	}
1844 
1845 	if (i < ndmas) {
1846 		goto hxge_alloc_tx_mem_pool_fail1;
1847 	}
1848 
1849 	st_tdc = p_cfgp->start_tdc;
1850 
1851 	/*
1852 	 * Allocate memory for descriptor rings and mailbox.
1853 	 */
1854 	for (j = 0; j < ndmas; j++) {
1855 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
1856 		    tx_cntl_alloc_size);
1857 		if (status != HXGE_OK) {
1858 			break;
1859 		}
1860 		st_tdc++;
1861 	}
1862 
1863 	if (j < ndmas) {
1864 		goto hxge_alloc_tx_mem_pool_fail2;
1865 	}
1866 
1867 	dma_poolp->ndmas = ndmas;
1868 	dma_poolp->num_chunks = num_chunks;
1869 	dma_poolp->buf_allocated = B_TRUE;
1870 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1871 	hxgep->tx_buf_pool_p = dma_poolp;
1872 
1873 	dma_cntl_poolp->ndmas = ndmas;
1874 	dma_cntl_poolp->buf_allocated = B_TRUE;
1875 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
1876 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
1877 
1878 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
1879 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
1880 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
1881 
1882 	goto hxge_alloc_tx_mem_pool_exit;
1883 
1884 hxge_alloc_tx_mem_pool_fail2:
1885 	/* Free control buffers */
1886 	j--;
1887 	for (; j >= 0; j--) {
1888 		hxge_free_tx_cntl_dma(hxgep,
1889 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
1890 	}
1891 
1892 hxge_alloc_tx_mem_pool_fail1:
1893 	/* Free data buffers */
1894 	i--;
1895 	for (; i >= 0; i--) {
1896 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1897 		    num_chunks[i]);
1898 	}
1899 
1900 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1901 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1902 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
1903 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1904 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1905 
1906 hxge_alloc_tx_mem_pool_exit:
1907 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
1908 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
1909 
1910 	return (status);
1911 }
1912 
1913 static hxge_status_t
1914 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1915     p_hxge_dma_common_t *dmap, size_t alloc_size,
1916     size_t block_size, uint32_t *num_chunks)
1917 {
1918 	p_hxge_dma_common_t	tx_dmap;
1919 	hxge_status_t		status = HXGE_OK;
1920 	size_t			total_alloc_size;
1921 	size_t			allocated = 0;
1922 	int			i, size_index, array_size;
1923 
1924 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
1925 
1926 	tx_dmap = (p_hxge_dma_common_t)
1927 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1928 
1929 	total_alloc_size = alloc_size;
1930 	i = 0;
1931 	size_index = 0;
1932 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1933 	while ((alloc_sizes[size_index] < alloc_size) &&
1934 	    (size_index < array_size))
1935 		size_index++;
1936 	if (size_index >= array_size) {
1937 		size_index = array_size - 1;
1938 	}
1939 
1940 	while ((allocated < total_alloc_size) &&
1941 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1942 		tx_dmap[i].dma_chunk_index = i;
1943 		tx_dmap[i].block_size = block_size;
1944 		tx_dmap[i].alength = alloc_sizes[size_index];
1945 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
1946 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1947 		tx_dmap[i].dma_channel = dma_channel;
1948 		tx_dmap[i].contig_alloc_type = B_FALSE;
1949 
1950 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1951 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
1952 		    &hxge_dev_buf_dma_acc_attr,
1953 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1954 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
1955 		if (status != HXGE_OK) {
1956 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1957 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
1958 			    " for size: %d", alloc_sizes[size_index]));
1959 			size_index--;
1960 		} else {
1961 			i++;
1962 			allocated += alloc_sizes[size_index];
1963 		}
1964 	}
1965 
1966 	if (allocated < total_alloc_size) {
1967 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1968 		    " hxge_alloc_tx_buf_dma: failed due to"
1969 		    " allocated(%d) < required(%d)",
1970 		    allocated, total_alloc_size));
1971 		goto hxge_alloc_tx_mem_fail1;
1972 	}
1973 
1974 	*num_chunks = i;
1975 	*dmap = tx_dmap;
1976 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1977 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
1978 	    *dmap, i));
1979 	goto hxge_alloc_tx_mem_exit;
1980 
1981 hxge_alloc_tx_mem_fail1:
1982 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1983 
1984 hxge_alloc_tx_mem_exit:
1985 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1986 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
1987 
1988 	return (status);
1989 }
1990 
1991 /*ARGSUSED*/
1992 static void
1993 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1994     uint32_t num_chunks)
1995 {
1996 	int i;
1997 
1998 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
1999 
2000 	for (i = 0; i < num_chunks; i++) {
2001 		hxge_dma_mem_free(dmap++);
2002 	}
2003 
2004 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2005 }
2006 
2007 /*ARGSUSED*/
2008 static hxge_status_t
2009 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2010     p_hxge_dma_common_t *dmap, size_t size)
2011 {
2012 	p_hxge_dma_common_t	tx_dmap;
2013 	hxge_status_t		status = HXGE_OK;
2014 
2015 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2016 
2017 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2018 	    KM_SLEEP);
2019 
2020 	tx_dmap->contig_alloc_type = B_FALSE;
2021 
2022 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2023 	    &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2024 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2025 	if (status != HXGE_OK) {
2026 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2027 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2028 		    " for size: %d", size));
2029 		goto hxge_alloc_tx_cntl_dma_fail1;
2030 	}
2031 
2032 	*dmap = tx_dmap;
2033 
2034 	goto hxge_alloc_tx_cntl_dma_exit;
2035 
2036 hxge_alloc_tx_cntl_dma_fail1:
2037 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2038 
2039 hxge_alloc_tx_cntl_dma_exit:
2040 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2041 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2042 
2043 	return (status);
2044 }
2045 
2046 /*ARGSUSED*/
2047 static void
2048 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2049 {
2050 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2051 
2052 	hxge_dma_mem_free(dmap);
2053 
2054 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2055 }
2056 
2057 static void
2058 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2059 {
2060 	uint32_t		i, ndmas;
2061 	p_hxge_dma_pool_t	dma_poolp;
2062 	p_hxge_dma_common_t	*dma_buf_p;
2063 	p_hxge_dma_pool_t	dma_cntl_poolp;
2064 	p_hxge_dma_common_t	*dma_cntl_p;
2065 	uint32_t		*num_chunks;
2066 
2067 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2068 
2069 	dma_poolp = hxgep->tx_buf_pool_p;
2070 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2071 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2072 		    "<== hxge_free_tx_mem_pool "
2073 		    "(null rx buf pool or buf not allocated"));
2074 		return;
2075 	}
2076 
2077 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2078 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2079 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2080 		    "<== hxge_free_tx_mem_pool "
2081 		    "(null tx cntl buf pool or cntl buf not allocated"));
2082 		return;
2083 	}
2084 
2085 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2086 	num_chunks = dma_poolp->num_chunks;
2087 
2088 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2089 	ndmas = dma_cntl_poolp->ndmas;
2090 
2091 	for (i = 0; i < ndmas; i++) {
2092 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2093 	}
2094 
2095 	for (i = 0; i < ndmas; i++) {
2096 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2097 	}
2098 
2099 	for (i = 0; i < ndmas; i++) {
2100 		KMEM_FREE(dma_buf_p[i],
2101 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2102 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2103 	}
2104 
2105 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2106 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2107 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2108 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2109 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2110 
2111 	hxgep->tx_buf_pool_p = NULL;
2112 	hxgep->tx_cntl_pool_p = NULL;
2113 
2114 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2115 }
2116 
2117 /*ARGSUSED*/
2118 static hxge_status_t
2119 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2120     struct ddi_dma_attr *dma_attrp,
2121     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2122     p_hxge_dma_common_t dma_p)
2123 {
2124 	caddr_t		kaddrp;
2125 	int		ddi_status = DDI_SUCCESS;
2126 
2127 	dma_p->dma_handle = NULL;
2128 	dma_p->acc_handle = NULL;
2129 	dma_p->kaddrp = NULL;
2130 
2131 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2132 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2133 	if (ddi_status != DDI_SUCCESS) {
2134 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2135 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2136 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2137 	}
2138 
2139 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2140 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2141 	    &dma_p->acc_handle);
2142 	if (ddi_status != DDI_SUCCESS) {
2143 		/* The caller will decide whether it is fatal */
2144 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2145 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2146 		ddi_dma_free_handle(&dma_p->dma_handle);
2147 		dma_p->dma_handle = NULL;
2148 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2149 	}
2150 
2151 	if (dma_p->alength < length) {
2152 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2153 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2154 		ddi_dma_mem_free(&dma_p->acc_handle);
2155 		ddi_dma_free_handle(&dma_p->dma_handle);
2156 		dma_p->acc_handle = NULL;
2157 		dma_p->dma_handle = NULL;
2158 		return (HXGE_ERROR);
2159 	}
2160 
2161 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2162 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2163 	    &dma_p->dma_cookie, &dma_p->ncookies);
2164 	if (ddi_status != DDI_DMA_MAPPED) {
2165 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2166 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2167 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2168 		if (dma_p->acc_handle) {
2169 			ddi_dma_mem_free(&dma_p->acc_handle);
2170 			dma_p->acc_handle = NULL;
2171 		}
2172 		ddi_dma_free_handle(&dma_p->dma_handle);
2173 		dma_p->dma_handle = NULL;
2174 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2175 	}
2176 
2177 	if (dma_p->ncookies != 1) {
2178 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2179 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2180 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2181 		if (dma_p->acc_handle) {
2182 			ddi_dma_mem_free(&dma_p->acc_handle);
2183 			dma_p->acc_handle = NULL;
2184 		}
2185 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2186 		ddi_dma_free_handle(&dma_p->dma_handle);
2187 		dma_p->dma_handle = NULL;
2188 		return (HXGE_ERROR);
2189 	}
2190 
2191 	dma_p->kaddrp = kaddrp;
2192 #if defined(__i386)
2193 	dma_p->ioaddr_pp =
2194 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2195 #else
2196 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2197 #endif
2198 
2199 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2200 
2201 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2202 	    "dma buffer allocated: dma_p $%p "
2203 	    "return dmac_ladress from cookie $%p dmac_size %d "
2204 	    "dma_p->ioaddr_p $%p "
2205 	    "dma_p->orig_ioaddr_p $%p "
2206 	    "orig_vatopa $%p "
2207 	    "alength %d (0x%x) "
2208 	    "kaddrp $%p "
2209 	    "length %d (0x%x)",
2210 	    dma_p,
2211 	    dma_p->dma_cookie.dmac_laddress,
2212 	    dma_p->dma_cookie.dmac_size,
2213 	    dma_p->ioaddr_pp,
2214 	    dma_p->orig_ioaddr_pp,
2215 	    dma_p->orig_vatopa,
2216 	    dma_p->alength, dma_p->alength,
2217 	    kaddrp,
2218 	    length, length));
2219 
2220 	return (HXGE_OK);
2221 }
2222 
2223 static void
2224 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2225 {
2226 	if (dma_p->dma_handle != NULL) {
2227 		if (dma_p->ncookies) {
2228 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2229 			dma_p->ncookies = 0;
2230 		}
2231 		ddi_dma_free_handle(&dma_p->dma_handle);
2232 		dma_p->dma_handle = NULL;
2233 	}
2234 	if (dma_p->acc_handle != NULL) {
2235 		ddi_dma_mem_free(&dma_p->acc_handle);
2236 		dma_p->acc_handle = NULL;
2237 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2238 	}
2239 	dma_p->kaddrp = NULL;
2240 	dma_p->alength = NULL;
2241 }
2242 
2243 /*
2244  *	hxge_m_start() -- start transmitting and receiving.
2245  *
2246  *	This function is called by the MAC layer when the first
2247  *	stream is open to prepare the hardware ready for sending
2248  *	and transmitting packets.
2249  */
2250 static int
2251 hxge_m_start(void *arg)
2252 {
2253 	p_hxge_t hxgep = (p_hxge_t)arg;
2254 
2255 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2256 
2257 	MUTEX_ENTER(hxgep->genlock);
2258 
2259 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2260 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2261 		    "<== hxge_m_start: initialization failed"));
2262 		MUTEX_EXIT(hxgep->genlock);
2263 		return (EIO);
2264 	}
2265 
2266 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2267 		/*
2268 		 * Start timer to check the system error and tx hangs
2269 		 */
2270 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2271 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2272 
2273 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2274 	}
2275 
2276 	MUTEX_EXIT(hxgep->genlock);
2277 
2278 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2279 
2280 	return (0);
2281 }
2282 
2283 /*
2284  * hxge_m_stop(): stop transmitting and receiving.
2285  */
2286 static void
2287 hxge_m_stop(void *arg)
2288 {
2289 	p_hxge_t hxgep = (p_hxge_t)arg;
2290 
2291 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2292 
2293 	if (hxgep->hxge_timerid) {
2294 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2295 		hxgep->hxge_timerid = 0;
2296 	}
2297 
2298 	MUTEX_ENTER(hxgep->genlock);
2299 
2300 	hxge_uninit(hxgep);
2301 
2302 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2303 
2304 	MUTEX_EXIT(hxgep->genlock);
2305 
2306 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2307 }
2308 
2309 static int
2310 hxge_m_unicst(void *arg, const uint8_t *macaddr)
2311 {
2312 	p_hxge_t		hxgep = (p_hxge_t)arg;
2313 	struct ether_addr	addrp;
2314 	hxge_status_t		status;
2315 
2316 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
2317 
2318 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
2319 
2320 	status = hxge_set_mac_addr(hxgep, &addrp);
2321 	if (status != HXGE_OK) {
2322 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2323 		    "<== hxge_m_unicst: set unitcast failed"));
2324 		return (EINVAL);
2325 	}
2326 
2327 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
2328 
2329 	return (0);
2330 }
2331 
2332 static int
2333 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2334 {
2335 	p_hxge_t		hxgep = (p_hxge_t)arg;
2336 	struct ether_addr	addrp;
2337 
2338 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2339 
2340 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2341 
2342 	if (add) {
2343 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2344 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2345 			    "<== hxge_m_multicst: add multicast failed"));
2346 			return (EINVAL);
2347 		}
2348 	} else {
2349 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2350 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2351 			    "<== hxge_m_multicst: del multicast failed"));
2352 			return (EINVAL);
2353 		}
2354 	}
2355 
2356 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2357 
2358 	return (0);
2359 }
2360 
2361 static int
2362 hxge_m_promisc(void *arg, boolean_t on)
2363 {
2364 	p_hxge_t hxgep = (p_hxge_t)arg;
2365 
2366 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2367 
2368 	if (hxge_set_promisc(hxgep, on)) {
2369 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2370 		    "<== hxge_m_promisc: set promisc failed"));
2371 		return (EINVAL);
2372 	}
2373 
2374 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2375 
2376 	return (0);
2377 }
2378 
2379 static void
2380 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2381 {
2382 	p_hxge_t	hxgep = (p_hxge_t)arg;
2383 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2384 	boolean_t	need_privilege;
2385 	int		err;
2386 	int		cmd;
2387 
2388 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2389 
2390 	iocp = (struct iocblk *)mp->b_rptr;
2391 	iocp->ioc_error = 0;
2392 	need_privilege = B_TRUE;
2393 	cmd = iocp->ioc_cmd;
2394 
2395 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2396 	switch (cmd) {
2397 	default:
2398 		miocnak(wq, mp, 0, EINVAL);
2399 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2400 		return;
2401 
2402 	case LB_GET_INFO_SIZE:
2403 	case LB_GET_INFO:
2404 	case LB_GET_MODE:
2405 		need_privilege = B_FALSE;
2406 		break;
2407 
2408 	case LB_SET_MODE:
2409 		break;
2410 
2411 	case ND_GET:
2412 		need_privilege = B_FALSE;
2413 		break;
2414 	case ND_SET:
2415 		break;
2416 
2417 	case HXGE_GET64:
2418 	case HXGE_PUT64:
2419 	case HXGE_GET_TX_RING_SZ:
2420 	case HXGE_GET_TX_DESC:
2421 	case HXGE_TX_SIDE_RESET:
2422 	case HXGE_RX_SIDE_RESET:
2423 	case HXGE_GLOBAL_RESET:
2424 	case HXGE_RESET_MAC:
2425 	case HXGE_PUT_TCAM:
2426 	case HXGE_GET_TCAM:
2427 	case HXGE_RTRACE:
2428 
2429 		need_privilege = B_FALSE;
2430 		break;
2431 	}
2432 
2433 	if (need_privilege) {
2434 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2435 		if (err != 0) {
2436 			miocnak(wq, mp, 0, err);
2437 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2438 			    "<== hxge_m_ioctl: no priv"));
2439 			return;
2440 		}
2441 	}
2442 
2443 	switch (cmd) {
2444 	case ND_GET:
2445 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2446 	case ND_SET:
2447 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2448 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2449 		break;
2450 
2451 	case LB_GET_MODE:
2452 	case LB_SET_MODE:
2453 	case LB_GET_INFO_SIZE:
2454 	case LB_GET_INFO:
2455 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2456 		break;
2457 
2458 	case HXGE_PUT_TCAM:
2459 	case HXGE_GET_TCAM:
2460 	case HXGE_GET64:
2461 	case HXGE_PUT64:
2462 	case HXGE_GET_TX_RING_SZ:
2463 	case HXGE_GET_TX_DESC:
2464 	case HXGE_TX_SIDE_RESET:
2465 	case HXGE_RX_SIDE_RESET:
2466 	case HXGE_GLOBAL_RESET:
2467 	case HXGE_RESET_MAC:
2468 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2469 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2470 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2471 		break;
2472 	}
2473 
2474 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2475 }
2476 
2477 extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
2478 
2479 static void
2480 hxge_m_resources(void *arg)
2481 {
2482 	p_hxge_t hxgep = arg;
2483 	mac_rx_fifo_t mrf;
2484 	p_rx_rcr_rings_t rcr_rings;
2485 	p_rx_rcr_ring_t *rcr_p;
2486 	p_rx_rcr_ring_t rcrp;
2487 	uint32_t i, ndmas;
2488 	int status;
2489 
2490 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources"));
2491 
2492 	MUTEX_ENTER(hxgep->genlock);
2493 
2494 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2495 		status = hxge_init(hxgep);
2496 		if (status != HXGE_OK) {
2497 			HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: "
2498 			    "hxge_init failed"));
2499 			MUTEX_EXIT(hxgep->genlock);
2500 			return;
2501 		}
2502 	}
2503 
2504 	mrf.mrf_type = MAC_RX_FIFO;
2505 	mrf.mrf_blank = hxge_rx_hw_blank;
2506 
2507 	mrf.mrf_normal_blank_time = RXDMA_RCR_PTHRES_DEFAULT;
2508 	mrf.mrf_normal_pkt_count = RXDMA_RCR_TO_DEFAULT;
2509 
2510 	rcr_rings = hxgep->rx_rcr_rings;
2511 	rcr_p = rcr_rings->rcr_rings;
2512 	ndmas = rcr_rings->ndmas;
2513 
2514 	/*
2515 	 * Export our receive resources to the MAC layer.
2516 	 */
2517 	for (i = 0; i < ndmas; i++) {
2518 		rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i];
2519 		mrf.mrf_arg = rcrp;
2520 		rcrp->rcr_mac_handle =
2521 		    mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf);
2522 
2523 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2524 		    "==> hxge_m_resources: vdma %d dma %d "
2525 		    "rcrptr 0x%016llx mac_handle 0x%016llx",
2526 		    i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle));
2527 	}
2528 
2529 	MUTEX_EXIT(hxgep->genlock);
2530 
2531 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources"));
2532 }
2533 
2534 /*
2535  * Set an alternate MAC address
2536  */
2537 static int
2538 hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot)
2539 {
2540 	uint64_t	address;
2541 	uint64_t	tmp;
2542 	hpi_status_t	status;
2543 	uint8_t		addrn;
2544 	int		i;
2545 
2546 	/*
2547 	 * Convert a byte array to a 48 bit value.
2548 	 * Need to check endianess if in doubt
2549 	 */
2550 	address = 0;
2551 	for (i = 0; i < ETHERADDRL; i++) {
2552 		tmp = maddr[i];
2553 		address <<= 8;
2554 		address |= tmp;
2555 	}
2556 
2557 	addrn = (uint8_t)slot;
2558 	status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address);
2559 	if (status != HPI_SUCCESS)
2560 		return (EIO);
2561 
2562 	return (0);
2563 }
2564 
2565 static void
2566 hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot)
2567 {
2568 	p_hxge_mmac_stats_t	mmac_stats;
2569 	int			i;
2570 	hxge_mmac_t		*mmac_info;
2571 
2572 	mmac_info = &hxgep->hxge_mmac_info;
2573 	mmac_stats = &hxgep->statsp->mmac_stats;
2574 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
2575 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
2576 
2577 	for (i = 0; i < ETHERADDRL; i++) {
2578 		mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] =
2579 		    mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
2580 	}
2581 }
2582 
2583 /*
2584  * Find an unused address slot, set the address value to the one specified,
2585  * enable the port to start filtering on the new MAC address.
2586  * Returns: 0 on success.
2587  */
2588 int
2589 hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
2590 {
2591 	p_hxge_t	hxgep = arg;
2592 	mac_addr_slot_t	slot;
2593 	hxge_mmac_t	*mmac_info;
2594 	int		err;
2595 	hxge_status_t	status;
2596 
2597 	mutex_enter(hxgep->genlock);
2598 
2599 	/*
2600 	 * Make sure that hxge is initialized, if _start() has
2601 	 * not been called.
2602 	 */
2603 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2604 		status = hxge_init(hxgep);
2605 		if (status != HXGE_OK) {
2606 			mutex_exit(hxgep->genlock);
2607 			return (ENXIO);
2608 		}
2609 	}
2610 
2611 	mmac_info = &hxgep->hxge_mmac_info;
2612 	if (mmac_info->naddrfree == 0) {
2613 		mutex_exit(hxgep->genlock);
2614 		return (ENOSPC);
2615 	}
2616 
2617 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2618 	    maddr->mma_addrlen)) {
2619 		mutex_exit(hxgep->genlock);
2620 		return (EINVAL);
2621 	}
2622 
2623 	/*
2624 	 * Search for the first available slot. Because naddrfree
2625 	 * is not zero, we are guaranteed to find one.
2626 	 * Slot 0 is for unique (primary) MAC.  The first alternate
2627 	 * MAC slot is slot 1.
2628 	 */
2629 	for (slot = 1; slot < mmac_info->num_mmac; slot++) {
2630 		if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
2631 			break;
2632 	}
2633 
2634 	ASSERT(slot < mmac_info->num_mmac);
2635 	if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) {
2636 		mutex_exit(hxgep->genlock);
2637 		return (err);
2638 	}
2639 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
2640 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
2641 	mmac_info->naddrfree--;
2642 	hxge_mmac_kstat_update(hxgep, slot);
2643 
2644 	maddr->mma_slot = slot;
2645 
2646 	mutex_exit(hxgep->genlock);
2647 	return (0);
2648 }
2649 
2650 /*
2651  * Remove the specified mac address and update
2652  * the h/w not to filter the mac address anymore.
2653  * Returns: 0, on success.
2654  */
2655 int
2656 hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
2657 {
2658 	p_hxge_t	hxgep = arg;
2659 	hxge_mmac_t	*mmac_info;
2660 	int		err = 0;
2661 	hxge_status_t	status;
2662 
2663 	mutex_enter(hxgep->genlock);
2664 
2665 	/*
2666 	 * Make sure that hxge is initialized, if _start() has
2667 	 * not been called.
2668 	 */
2669 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2670 		status = hxge_init(hxgep);
2671 		if (status != HXGE_OK) {
2672 			mutex_exit(hxgep->genlock);
2673 			return (ENXIO);
2674 		}
2675 	}
2676 
2677 	mmac_info = &hxgep->hxge_mmac_info;
2678 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2679 		mutex_exit(hxgep->genlock);
2680 		return (EINVAL);
2681 	}
2682 
2683 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2684 		if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) ==
2685 		    HPI_SUCCESS) {
2686 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
2687 			mmac_info->naddrfree++;
2688 			/*
2689 			 * Clear mac_pool[slot].addr so that kstat shows 0
2690 			 * alternate MAC address if the slot is not used.
2691 			 */
2692 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
2693 			hxge_mmac_kstat_update(hxgep, slot);
2694 		} else {
2695 			err = EIO;
2696 		}
2697 	} else {
2698 		err = EINVAL;
2699 	}
2700 
2701 	mutex_exit(hxgep->genlock);
2702 	return (err);
2703 }
2704 
2705 /*
2706  * Modify a mac address added by hxge_mmac_add().
2707  * Returns: 0, on success.
2708  */
2709 int
2710 hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
2711 {
2712 	p_hxge_t	hxgep = arg;
2713 	mac_addr_slot_t	slot;
2714 	hxge_mmac_t	*mmac_info;
2715 	int		err = 0;
2716 	hxge_status_t	status;
2717 
2718 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2719 	    maddr->mma_addrlen))
2720 		return (EINVAL);
2721 
2722 	slot = maddr->mma_slot;
2723 
2724 	mutex_enter(hxgep->genlock);
2725 
2726 	/*
2727 	 * Make sure that hxge is initialized, if _start() has
2728 	 * not been called.
2729 	 */
2730 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2731 		status = hxge_init(hxgep);
2732 		if (status != HXGE_OK) {
2733 			mutex_exit(hxgep->genlock);
2734 			return (ENXIO);
2735 		}
2736 	}
2737 
2738 	mmac_info = &hxgep->hxge_mmac_info;
2739 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2740 		mutex_exit(hxgep->genlock);
2741 		return (EINVAL);
2742 	}
2743 
2744 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2745 		if ((err = hxge_altmac_set(hxgep, maddr->mma_addr,
2746 		    slot)) == 0) {
2747 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
2748 			    ETHERADDRL);
2749 			hxge_mmac_kstat_update(hxgep, slot);
2750 		}
2751 	} else {
2752 		err = EINVAL;
2753 	}
2754 
2755 	mutex_exit(hxgep->genlock);
2756 	return (err);
2757 }
2758 
2759 /*
2760  * static int
2761  * hxge_m_mmac_get() - Get the MAC address and other information
2762  *	related to the slot.  mma_flags should be set to 0 in the call.
2763  *	Note: although kstat shows MAC address as zero when a slot is
2764  *	not used, Crossbow expects hxge_m_mmac_get to copy factory MAC
2765  *	to the caller as long as the slot is not using a user MAC address.
2766  *	The following table shows the rules,
2767  *
2768  *     					USED    VENDOR    mma_addr
2769  *	------------------------------------------------------------
2770  *	(1) Slot uses a user MAC:	yes      no     user MAC
2771  *	(2) Slot uses a factory MAC:    yes      yes    factory MAC
2772  *	(3) Slot is not used but is
2773  *	     factory MAC capable:	no       yes    factory MAC
2774  *	(4) Slot is not used and is
2775  *	     not factory MAC capable:   no       no	0
2776  *	------------------------------------------------------------
2777  */
2778 int
2779 hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
2780 {
2781 	hxge_t		*hxgep = arg;
2782 	mac_addr_slot_t	slot;
2783 	hxge_mmac_t	*mmac_info;
2784 	hxge_status_t	status;
2785 
2786 	slot = maddr->mma_slot;
2787 
2788 	mutex_enter(hxgep->genlock);
2789 
2790 	/*
2791 	 * Make sure that hxge is initialized, if _start() has
2792 	 * not been called.
2793 	 */
2794 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2795 		status = hxge_init(hxgep);
2796 		if (status != HXGE_OK) {
2797 			mutex_exit(hxgep->genlock);
2798 			return (ENXIO);
2799 		}
2800 	}
2801 
2802 	mmac_info = &hxgep->hxge_mmac_info;
2803 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2804 		mutex_exit(hxgep->genlock);
2805 		return (EINVAL);
2806 	}
2807 
2808 	maddr->mma_flags = 0;
2809 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2810 		maddr->mma_flags |= MMAC_SLOT_USED;
2811 		bcopy(mmac_info->mac_pool[slot].addr,
2812 		    maddr->mma_addr, ETHERADDRL);
2813 		maddr->mma_addrlen = ETHERADDRL;
2814 	}
2815 
2816 	mutex_exit(hxgep->genlock);
2817 	return (0);
2818 }
2819 
2820 /*ARGSUSED*/
2821 boolean_t
2822 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2823 {
2824 	p_hxge_t		hxgep = (p_hxge_t)arg;
2825 	uint32_t		*txflags = cap_data;
2826 	multiaddress_capab_t	*mmacp = cap_data;
2827 
2828 	switch (cap) {
2829 	case MAC_CAPAB_HCKSUM:
2830 		*txflags = HCKSUM_INET_PARTIAL;
2831 		break;
2832 
2833 	case MAC_CAPAB_POLL:
2834 		/*
2835 		 * There's nothing for us to fill in, simply returning B_TRUE
2836 		 * stating that we support polling is sufficient.
2837 		 */
2838 		break;
2839 
2840 	case MAC_CAPAB_MULTIADDRESS:
2841 		/*
2842 		 * The number of MAC addresses made available by
2843 		 * this capability is one less than the total as
2844 		 * the primary address in slot 0 is counted in
2845 		 * the total.
2846 		 */
2847 		mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1;
2848 		mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree;
2849 		mmacp->maddr_flag = 0;	/* No multiple factory macs */
2850 		mmacp->maddr_handle = hxgep;
2851 		mmacp->maddr_add = hxge_m_mmac_add;
2852 		mmacp->maddr_remove = hxge_m_mmac_remove;
2853 		mmacp->maddr_modify = hxge_m_mmac_modify;
2854 		mmacp->maddr_get = hxge_m_mmac_get;
2855 		mmacp->maddr_reserve = NULL;	/* No multiple factory macs */
2856 		break;
2857 	default:
2858 		return (B_FALSE);
2859 	}
2860 	return (B_TRUE);
2861 }
2862 
2863 /*
2864  * Module loading and removing entry points.
2865  */
2866 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
2867     nodev, NULL, D_MP, NULL);
2868 
2869 extern struct mod_ops mod_driverops;
2870 
2871 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
2872 
2873 /*
2874  * Module linkage information for the kernel.
2875  */
2876 static struct modldrv hxge_modldrv = {
2877 	&mod_driverops,
2878 	HXGE_DESC_VER,
2879 	&hxge_dev_ops
2880 };
2881 
2882 static struct modlinkage modlinkage = {
2883 	MODREV_1, (void *) &hxge_modldrv, NULL
2884 };
2885 
2886 int
2887 _init(void)
2888 {
2889 	int status;
2890 
2891 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
2892 	mac_init_ops(&hxge_dev_ops, "hxge");
2893 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
2894 	if (status != 0) {
2895 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
2896 		    "failed to init device soft state"));
2897 		mac_fini_ops(&hxge_dev_ops);
2898 		goto _init_exit;
2899 	}
2900 
2901 	status = mod_install(&modlinkage);
2902 	if (status != 0) {
2903 		ddi_soft_state_fini(&hxge_list);
2904 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
2905 		goto _init_exit;
2906 	}
2907 
2908 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
2909 
2910 _init_exit:
2911 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
2912 
2913 	return (status);
2914 }
2915 
2916 int
2917 _fini(void)
2918 {
2919 	int status;
2920 
2921 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
2922 
2923 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
2924 
2925 	if (hxge_mblks_pending)
2926 		return (EBUSY);
2927 
2928 	status = mod_remove(&modlinkage);
2929 	if (status != DDI_SUCCESS) {
2930 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
2931 		    "Module removal failed 0x%08x", status));
2932 		goto _fini_exit;
2933 	}
2934 
2935 	mac_fini_ops(&hxge_dev_ops);
2936 
2937 	ddi_soft_state_fini(&hxge_list);
2938 
2939 	MUTEX_DESTROY(&hxge_common_lock);
2940 
2941 _fini_exit:
2942 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
2943 
2944 	return (status);
2945 }
2946 
2947 int
2948 _info(struct modinfo *modinfop)
2949 {
2950 	int status;
2951 
2952 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
2953 	status = mod_info(&modlinkage, modinfop);
2954 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
2955 
2956 	return (status);
2957 }
2958 
2959 /*ARGSUSED*/
2960 hxge_status_t
2961 hxge_add_intrs(p_hxge_t hxgep)
2962 {
2963 	int		intr_types;
2964 	int		type = 0;
2965 	int		ddi_status = DDI_SUCCESS;
2966 	hxge_status_t	status = HXGE_OK;
2967 
2968 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
2969 
2970 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
2971 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
2972 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
2973 	hxgep->hxge_intr_type.intr_added = 0;
2974 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
2975 	hxgep->hxge_intr_type.intr_type = 0;
2976 
2977 	if (hxge_msi_enable) {
2978 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
2979 	}
2980 
2981 	/* Get the supported interrupt types */
2982 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
2983 	    != DDI_SUCCESS) {
2984 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
2985 		    "ddi_intr_get_supported_types failed: status 0x%08x",
2986 		    ddi_status));
2987 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2988 	}
2989 
2990 	hxgep->hxge_intr_type.intr_types = intr_types;
2991 
2992 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
2993 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
2994 
2995 	/*
2996 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
2997 	 *	(1): 1 - MSI
2998 	 *	(2): 2 - MSI-X
2999 	 *	others - FIXED
3000 	 */
3001 	switch (hxge_msi_enable) {
3002 	default:
3003 		type = DDI_INTR_TYPE_FIXED;
3004 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3005 		    "use fixed (intx emulation) type %08x", type));
3006 		break;
3007 
3008 	case 2:
3009 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3010 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3011 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3012 			type = DDI_INTR_TYPE_MSIX;
3013 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3014 			    "==> hxge_add_intrs: "
3015 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3016 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3017 			type = DDI_INTR_TYPE_MSI;
3018 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3019 			    "==> hxge_add_intrs: "
3020 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3021 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3022 			type = DDI_INTR_TYPE_FIXED;
3023 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3024 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3025 		}
3026 		break;
3027 
3028 	case 1:
3029 		if (intr_types & DDI_INTR_TYPE_MSI) {
3030 			type = DDI_INTR_TYPE_MSI;
3031 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3032 			    "==> hxge_add_intrs: "
3033 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3034 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3035 			type = DDI_INTR_TYPE_MSIX;
3036 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3037 			    "==> hxge_add_intrs: "
3038 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3039 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3040 			type = DDI_INTR_TYPE_FIXED;
3041 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3042 			    "==> hxge_add_intrs: "
3043 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3044 		}
3045 	}
3046 
3047 	hxgep->hxge_intr_type.intr_type = type;
3048 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3049 	    type == DDI_INTR_TYPE_FIXED) &&
3050 	    hxgep->hxge_intr_type.niu_msi_enable) {
3051 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3052 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3053 			    " hxge_add_intrs: "
3054 			    " hxge_add_intrs_adv failed: status 0x%08x",
3055 			    status));
3056 			return (status);
3057 		} else {
3058 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3059 			    "interrupts registered : type %d", type));
3060 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3061 
3062 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3063 			    "\nAdded advanced hxge add_intr_adv "
3064 			    "intr type 0x%x\n", type));
3065 
3066 			return (status);
3067 		}
3068 	}
3069 
3070 	if (!hxgep->hxge_intr_type.intr_registered) {
3071 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3072 		    "==> hxge_add_intrs: failed to register interrupts"));
3073 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3074 	}
3075 
3076 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3077 
3078 	return (status);
3079 }
3080 
3081 /*ARGSUSED*/
3082 static hxge_status_t
3083 hxge_add_soft_intrs(p_hxge_t hxgep)
3084 {
3085 	int		ddi_status = DDI_SUCCESS;
3086 	hxge_status_t	status = HXGE_OK;
3087 
3088 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
3089 
3090 	hxgep->resched_id = NULL;
3091 	hxgep->resched_running = B_FALSE;
3092 	ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
3093 	    &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
3094 	if (ddi_status != DDI_SUCCESS) {
3095 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
3096 		    "ddi_add_softintrs failed: status 0x%08x", ddi_status));
3097 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3098 	}
3099 
3100 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
3101 
3102 	return (status);
3103 }
3104 
3105 /*ARGSUSED*/
3106 static hxge_status_t
3107 hxge_add_intrs_adv(p_hxge_t hxgep)
3108 {
3109 	int		intr_type;
3110 	p_hxge_intr_t	intrp;
3111 	hxge_status_t	status;
3112 
3113 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3114 
3115 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3116 	intr_type = intrp->intr_type;
3117 
3118 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3119 	    intr_type));
3120 
3121 	switch (intr_type) {
3122 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3123 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3124 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3125 		break;
3126 
3127 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3128 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3129 		break;
3130 
3131 	default:
3132 		status = HXGE_ERROR;
3133 		break;
3134 	}
3135 
3136 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3137 
3138 	return (status);
3139 }
3140 
3141 /*ARGSUSED*/
3142 static hxge_status_t
3143 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3144 {
3145 	dev_info_t	*dip = hxgep->dip;
3146 	p_hxge_ldg_t	ldgp;
3147 	p_hxge_intr_t	intrp;
3148 	uint_t		*inthandler;
3149 	void		*arg1, *arg2;
3150 	int		behavior;
3151 	int		nintrs, navail;
3152 	int		nactual, nrequired;
3153 	int		inum = 0;
3154 	int		loop = 0;
3155 	int		x, y;
3156 	int		ddi_status = DDI_SUCCESS;
3157 	hxge_status_t	status = HXGE_OK;
3158 
3159 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3160 
3161 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3162 
3163 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3164 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3165 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3166 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3167 		    "nintrs: %d", ddi_status, nintrs));
3168 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3169 	}
3170 
3171 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3172 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3173 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3174 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3175 		    "nintrs: %d", ddi_status, navail));
3176 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3177 	}
3178 
3179 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3180 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3181 	    int_type, nintrs, navail));
3182 
3183 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3184 		/* MSI must be power of 2 */
3185 		if ((navail & 16) == 16) {
3186 			navail = 16;
3187 		} else if ((navail & 8) == 8) {
3188 			navail = 8;
3189 		} else if ((navail & 4) == 4) {
3190 			navail = 4;
3191 		} else if ((navail & 2) == 2) {
3192 			navail = 2;
3193 		} else {
3194 			navail = 1;
3195 		}
3196 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3197 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3198 		    "navail %d", nintrs, navail));
3199 	}
3200 
3201 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3202 	    "requesting: intr type %d nintrs %d, navail %d",
3203 	    int_type, nintrs, navail));
3204 
3205 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3206 	    DDI_INTR_ALLOC_NORMAL);
3207 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3208 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3209 
3210 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3211 	    navail, &nactual, behavior);
3212 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3213 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3214 		    " ddi_intr_alloc() failed: %d", ddi_status));
3215 		kmem_free(intrp->htable, intrp->intr_size);
3216 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3217 	}
3218 
3219 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3220 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3221 	    navail, nactual));
3222 
3223 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3224 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3225 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3226 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3227 		/* Free already allocated interrupts */
3228 		for (y = 0; y < nactual; y++) {
3229 			(void) ddi_intr_free(intrp->htable[y]);
3230 		}
3231 
3232 		kmem_free(intrp->htable, intrp->intr_size);
3233 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3234 	}
3235 
3236 	nrequired = 0;
3237 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3238 	if (status != HXGE_OK) {
3239 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3240 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3241 		    "failed: 0x%x", status));
3242 		/* Free already allocated interrupts */
3243 		for (y = 0; y < nactual; y++) {
3244 			(void) ddi_intr_free(intrp->htable[y]);
3245 		}
3246 
3247 		kmem_free(intrp->htable, intrp->intr_size);
3248 		return (status);
3249 	}
3250 
3251 	ldgp = hxgep->ldgvp->ldgp;
3252 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3253 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3254 
3255 	if (nactual < nrequired)
3256 		loop = nactual;
3257 	else
3258 		loop = nrequired;
3259 
3260 	for (x = 0; x < loop; x++, ldgp++) {
3261 		ldgp->vector = (uint8_t)x;
3262 		arg1 = ldgp->ldvp;
3263 		arg2 = hxgep;
3264 		if (ldgp->nldvs == 1) {
3265 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3266 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3267 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3268 			    "1-1 int handler (entry %d)\n",
3269 			    arg1, arg2, x));
3270 		} else if (ldgp->nldvs > 1) {
3271 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3272 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3273 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3274 			    "nldevs %d int handler (entry %d)\n",
3275 			    arg1, arg2, ldgp->nldvs, x));
3276 		}
3277 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3278 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3279 		    "htable 0x%llx", x, intrp->htable[x]));
3280 
3281 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3282 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3283 		    DDI_SUCCESS) {
3284 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3285 			    "==> hxge_add_intrs_adv_type: failed #%d "
3286 			    "status 0x%x", x, ddi_status));
3287 			for (y = 0; y < intrp->intr_added; y++) {
3288 				(void) ddi_intr_remove_handler(
3289 				    intrp->htable[y]);
3290 			}
3291 
3292 			/* Free already allocated intr */
3293 			for (y = 0; y < nactual; y++) {
3294 				(void) ddi_intr_free(intrp->htable[y]);
3295 			}
3296 			kmem_free(intrp->htable, intrp->intr_size);
3297 
3298 			(void) hxge_ldgv_uninit(hxgep);
3299 
3300 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3301 		}
3302 
3303 		intrp->intr_added++;
3304 	}
3305 	intrp->msi_intx_cnt = nactual;
3306 
3307 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3308 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3309 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3310 
3311 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3312 	(void) hxge_intr_ldgv_init(hxgep);
3313 
3314 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3315 
3316 	return (status);
3317 }
3318 
3319 /*ARGSUSED*/
3320 static hxge_status_t
3321 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3322 {
3323 	dev_info_t	*dip = hxgep->dip;
3324 	p_hxge_ldg_t	ldgp;
3325 	p_hxge_intr_t	intrp;
3326 	uint_t		*inthandler;
3327 	void		*arg1, *arg2;
3328 	int		behavior;
3329 	int		nintrs, navail;
3330 	int		nactual, nrequired;
3331 	int		inum = 0;
3332 	int		x, y;
3333 	int		ddi_status = DDI_SUCCESS;
3334 	hxge_status_t	status = HXGE_OK;
3335 
3336 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3337 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3338 
3339 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3340 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3341 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3342 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3343 		    "nintrs: %d", status, nintrs));
3344 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3345 	}
3346 
3347 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3348 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3349 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3350 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3351 		    "nintrs: %d", ddi_status, navail));
3352 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3353 	}
3354 
3355 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3356 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3357 	    nintrs, navail));
3358 
3359 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3360 	    DDI_INTR_ALLOC_NORMAL);
3361 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3362 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
3363 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3364 	    navail, &nactual, behavior);
3365 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3366 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3367 		    " ddi_intr_alloc() failed: %d", ddi_status));
3368 		kmem_free(intrp->htable, intrp->intr_size);
3369 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3370 	}
3371 
3372 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3373 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3374 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3375 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3376 		/* Free already allocated interrupts */
3377 		for (y = 0; y < nactual; y++) {
3378 			(void) ddi_intr_free(intrp->htable[y]);
3379 		}
3380 
3381 		kmem_free(intrp->htable, intrp->intr_size);
3382 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3383 	}
3384 
3385 	nrequired = 0;
3386 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3387 	if (status != HXGE_OK) {
3388 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3389 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
3390 		    "failed: 0x%x", status));
3391 		/* Free already allocated interrupts */
3392 		for (y = 0; y < nactual; y++) {
3393 			(void) ddi_intr_free(intrp->htable[y]);
3394 		}
3395 
3396 		kmem_free(intrp->htable, intrp->intr_size);
3397 		return (status);
3398 	}
3399 
3400 	ldgp = hxgep->ldgvp->ldgp;
3401 	for (x = 0; x < nrequired; x++, ldgp++) {
3402 		ldgp->vector = (uint8_t)x;
3403 		arg1 = ldgp->ldvp;
3404 		arg2 = hxgep;
3405 		if (ldgp->nldvs == 1) {
3406 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3407 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3408 			    "hxge_add_intrs_adv_type_fix: "
3409 			    "1-1 int handler(%d) ldg %d ldv %d "
3410 			    "arg1 $%p arg2 $%p\n",
3411 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
3412 		} else if (ldgp->nldvs > 1) {
3413 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3414 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3415 			    "hxge_add_intrs_adv_type_fix: "
3416 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
3417 			    "arg1 0x%016llx arg2 0x%016llx\n",
3418 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
3419 			    arg1, arg2));
3420 		}
3421 
3422 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3423 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3424 		    DDI_SUCCESS) {
3425 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3426 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
3427 			    "status 0x%x", x, ddi_status));
3428 			for (y = 0; y < intrp->intr_added; y++) {
3429 				(void) ddi_intr_remove_handler(
3430 				    intrp->htable[y]);
3431 			}
3432 			for (y = 0; y < nactual; y++) {
3433 				(void) ddi_intr_free(intrp->htable[y]);
3434 			}
3435 			/* Free already allocated intr */
3436 			kmem_free(intrp->htable, intrp->intr_size);
3437 
3438 			(void) hxge_ldgv_uninit(hxgep);
3439 
3440 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3441 		}
3442 		intrp->intr_added++;
3443 	}
3444 
3445 	intrp->msi_intx_cnt = nactual;
3446 
3447 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3448 
3449 	status = hxge_intr_ldgv_init(hxgep);
3450 
3451 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
3452 
3453 	return (status);
3454 }
3455 
3456 /*ARGSUSED*/
3457 static void
3458 hxge_remove_intrs(p_hxge_t hxgep)
3459 {
3460 	int		i, inum;
3461 	p_hxge_intr_t	intrp;
3462 
3463 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
3464 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3465 	if (!intrp->intr_registered) {
3466 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3467 		    "<== hxge_remove_intrs: interrupts not registered"));
3468 		return;
3469 	}
3470 
3471 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
3472 
3473 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3474 		(void) ddi_intr_block_disable(intrp->htable,
3475 		    intrp->intr_added);
3476 	} else {
3477 		for (i = 0; i < intrp->intr_added; i++) {
3478 			(void) ddi_intr_disable(intrp->htable[i]);
3479 		}
3480 	}
3481 
3482 	for (inum = 0; inum < intrp->intr_added; inum++) {
3483 		if (intrp->htable[inum]) {
3484 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
3485 		}
3486 	}
3487 
3488 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
3489 		if (intrp->htable[inum]) {
3490 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3491 			    "hxge_remove_intrs: ddi_intr_free inum %d "
3492 			    "msi_intx_cnt %d intr_added %d",
3493 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
3494 
3495 			(void) ddi_intr_free(intrp->htable[inum]);
3496 		}
3497 	}
3498 
3499 	kmem_free(intrp->htable, intrp->intr_size);
3500 	intrp->intr_registered = B_FALSE;
3501 	intrp->intr_enabled = B_FALSE;
3502 	intrp->msi_intx_cnt = 0;
3503 	intrp->intr_added = 0;
3504 
3505 	(void) hxge_ldgv_uninit(hxgep);
3506 
3507 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
3508 }
3509 
3510 /*ARGSUSED*/
3511 static void
3512 hxge_remove_soft_intrs(p_hxge_t hxgep)
3513 {
3514 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
3515 
3516 	if (hxgep->resched_id) {
3517 		ddi_remove_softintr(hxgep->resched_id);
3518 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3519 		    "==> hxge_remove_soft_intrs: removed"));
3520 		hxgep->resched_id = NULL;
3521 	}
3522 
3523 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
3524 }
3525 
3526 /*ARGSUSED*/
3527 void
3528 hxge_intrs_enable(p_hxge_t hxgep)
3529 {
3530 	p_hxge_intr_t	intrp;
3531 	int		i;
3532 	int		status;
3533 
3534 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
3535 
3536 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3537 
3538 	if (!intrp->intr_registered) {
3539 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
3540 		    "interrupts are not registered"));
3541 		return;
3542 	}
3543 
3544 	if (intrp->intr_enabled) {
3545 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3546 		    "<== hxge_intrs_enable: already enabled"));
3547 		return;
3548 	}
3549 
3550 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3551 		status = ddi_intr_block_enable(intrp->htable,
3552 		    intrp->intr_added);
3553 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
3554 		    "block enable - status 0x%x total inums #%d\n",
3555 		    status, intrp->intr_added));
3556 	} else {
3557 		for (i = 0; i < intrp->intr_added; i++) {
3558 			status = ddi_intr_enable(intrp->htable[i]);
3559 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
3560 			    "ddi_intr_enable:enable - status 0x%x "
3561 			    "total inums %d enable inum #%d\n",
3562 			    status, intrp->intr_added, i));
3563 			if (status == DDI_SUCCESS) {
3564 				intrp->intr_enabled = B_TRUE;
3565 			}
3566 		}
3567 	}
3568 
3569 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
3570 }
3571 
3572 /*ARGSUSED*/
3573 static void
3574 hxge_intrs_disable(p_hxge_t hxgep)
3575 {
3576 	p_hxge_intr_t	intrp;
3577 	int		i;
3578 
3579 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
3580 
3581 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3582 
3583 	if (!intrp->intr_registered) {
3584 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
3585 		    "interrupts are not registered"));
3586 		return;
3587 	}
3588 
3589 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3590 		(void) ddi_intr_block_disable(intrp->htable,
3591 		    intrp->intr_added);
3592 	} else {
3593 		for (i = 0; i < intrp->intr_added; i++) {
3594 			(void) ddi_intr_disable(intrp->htable[i]);
3595 		}
3596 	}
3597 
3598 	intrp->intr_enabled = B_FALSE;
3599 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
3600 }
3601 
3602 static hxge_status_t
3603 hxge_mac_register(p_hxge_t hxgep)
3604 {
3605 	mac_register_t	*macp;
3606 	int		status;
3607 
3608 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
3609 
3610 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
3611 		return (HXGE_ERROR);
3612 
3613 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
3614 	macp->m_driver = hxgep;
3615 	macp->m_dip = hxgep->dip;
3616 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
3617 
3618 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3619 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
3620 	    macp->m_src_addr[0],
3621 	    macp->m_src_addr[1],
3622 	    macp->m_src_addr[2],
3623 	    macp->m_src_addr[3],
3624 	    macp->m_src_addr[4],
3625 	    macp->m_src_addr[5]));
3626 
3627 	macp->m_callbacks = &hxge_m_callbacks;
3628 	macp->m_min_sdu = 0;
3629 	macp->m_max_sdu = hxgep->vmac.maxframesize -
3630 	    sizeof (struct ether_header) - ETHERFCSL - 4 - TX_PKT_HEADER_SIZE;
3631 
3632 	status = mac_register(macp, &hxgep->mach);
3633 	mac_free(macp);
3634 
3635 	if (status != 0) {
3636 		cmn_err(CE_WARN,
3637 		    "hxge_mac_register failed (status %d instance %d)",
3638 		    status, hxgep->instance);
3639 		return (HXGE_ERROR);
3640 	}
3641 
3642 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
3643 	    "(instance %d)", hxgep->instance));
3644 
3645 	return (HXGE_OK);
3646 }
3647 
3648 static int
3649 hxge_init_common_dev(p_hxge_t hxgep)
3650 {
3651 	p_hxge_hw_list_t	hw_p;
3652 	dev_info_t		*p_dip;
3653 
3654 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
3655 
3656 	p_dip = hxgep->p_dip;
3657 	MUTEX_ENTER(&hxge_common_lock);
3658 
3659 	/*
3660 	 * Loop through existing per Hydra hardware list.
3661 	 */
3662 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
3663 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3664 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
3665 		    hw_p, p_dip));
3666 		if (hw_p->parent_devp == p_dip) {
3667 			hxgep->hxge_hw_p = hw_p;
3668 			hw_p->ndevs++;
3669 			hw_p->hxge_p = hxgep;
3670 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3671 			    "==> hxge_init_common_device: "
3672 			    "hw_p $%p parent dip $%p ndevs %d (found)",
3673 			    hw_p, p_dip, hw_p->ndevs));
3674 			break;
3675 		}
3676 	}
3677 
3678 	if (hw_p == NULL) {
3679 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3680 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
3681 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
3682 		hw_p->parent_devp = p_dip;
3683 		hw_p->magic = HXGE_MAGIC;
3684 		hxgep->hxge_hw_p = hw_p;
3685 		hw_p->ndevs++;
3686 		hw_p->hxge_p = hxgep;
3687 		hw_p->next = hxge_hw_list;
3688 
3689 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
3690 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
3691 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
3692 
3693 		hxge_hw_list = hw_p;
3694 	}
3695 	MUTEX_EXIT(&hxge_common_lock);
3696 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3697 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
3698 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
3699 
3700 	return (HXGE_OK);
3701 }
3702 
3703 static void
3704 hxge_uninit_common_dev(p_hxge_t hxgep)
3705 {
3706 	p_hxge_hw_list_t	hw_p, h_hw_p;
3707 	dev_info_t		*p_dip;
3708 
3709 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
3710 	if (hxgep->hxge_hw_p == NULL) {
3711 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3712 		    "<== hxge_uninit_common_dev (no common)"));
3713 		return;
3714 	}
3715 
3716 	MUTEX_ENTER(&hxge_common_lock);
3717 	h_hw_p = hxge_hw_list;
3718 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
3719 		p_dip = hw_p->parent_devp;
3720 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
3721 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
3722 		    hw_p->magic == HXGE_MAGIC) {
3723 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3724 			    "==> hxge_uninit_common_dev: "
3725 			    "hw_p $%p parent dip $%p ndevs %d (found)",
3726 			    hw_p, p_dip, hw_p->ndevs));
3727 
3728 			hxgep->hxge_hw_p = NULL;
3729 			if (hw_p->ndevs) {
3730 				hw_p->ndevs--;
3731 			}
3732 			hw_p->hxge_p = NULL;
3733 			if (!hw_p->ndevs) {
3734 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
3735 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
3736 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
3737 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3738 				    "==> hxge_uninit_common_dev: "
3739 				    "hw_p $%p parent dip $%p ndevs %d (last)",
3740 				    hw_p, p_dip, hw_p->ndevs));
3741 
3742 				if (hw_p == hxge_hw_list) {
3743 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3744 					    "==> hxge_uninit_common_dev:"
3745 					    "remove head "
3746 					    "hw_p $%p parent dip $%p "
3747 					    "ndevs %d (head)",
3748 					    hw_p, p_dip, hw_p->ndevs));
3749 					hxge_hw_list = hw_p->next;
3750 				} else {
3751 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3752 					    "==> hxge_uninit_common_dev:"
3753 					    "remove middle "
3754 					    "hw_p $%p parent dip $%p "
3755 					    "ndevs %d (middle)",
3756 					    hw_p, p_dip, hw_p->ndevs));
3757 					h_hw_p->next = hw_p->next;
3758 				}
3759 
3760 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
3761 			}
3762 			break;
3763 		} else {
3764 			h_hw_p = hw_p;
3765 		}
3766 	}
3767 
3768 	MUTEX_EXIT(&hxge_common_lock);
3769 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3770 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
3771 
3772 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
3773 }
3774