xref: /illumos-gate/usr/src/uts/common/io/hxge/hxge_main.c (revision ffb5616e59d0fbdc1ee94070050f240a6a4ac8e2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28  */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31 
32 /*
33  * PSARC/2007/453 MSI-X interrupt limit override
34  * (This PSARC case is limited to MSI-X vectors
35  *  and SPARC platforms only).
36  */
37 #if defined(_BIG_ENDIAN)
38 uint32_t hxge_msi_enable = 2;
39 #else
40 uint32_t hxge_msi_enable = 1;
41 #endif
42 
43 /*
44  * Globals: tunable parameters (/etc/system or adb)
45  *
46  */
47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
48 uint32_t hxge_rbr_spare_size = 0;
49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
55 
56 static hxge_os_mutex_t hxgedebuglock;
57 static int hxge_debug_init = 0;
58 
59 /*
60  * Debugging flags:
61  *		hxge_no_tx_lb : transmit load balancing
62  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
63  *				   1 - From the Stack
64  *				   2 - Destination IP Address
65  */
66 uint32_t hxge_no_tx_lb = 0;
67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
68 
69 /*
70  * Add tunable to reduce the amount of time spent in the
71  * ISR doing Rx Processing.
72  */
73 #if defined(__sparc)
74 uint32_t hxge_max_rx_pkts = 512;
75 #else
76 uint32_t hxge_max_rx_pkts = 1024;
77 #endif
78 
79 /*
80  * Tunables to manage the receive buffer blocks.
81  *
82  * hxge_rx_threshold_hi: copy all buffers.
83  * hxge_rx_bcopy_size_type: receive buffer block size type.
84  * hxge_rx_threshold_lo: copy only up to tunable block size type.
85  */
86 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_7;
87 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
88 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3;
89 
90 rtrace_t hpi_rtracebuf;
91 
92 /*
93  * Function Prototypes
94  */
95 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
96 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
97 static void hxge_unattach(p_hxge_t);
98 
99 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
100 
101 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
102 static void hxge_destroy_mutexes(p_hxge_t);
103 
104 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
105 static void hxge_unmap_regs(p_hxge_t hxgep);
106 
107 hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
108 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
109 static void hxge_remove_intrs(p_hxge_t hxgep);
110 static void hxge_remove_soft_intrs(p_hxge_t hxgep);
111 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
112 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
113 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
114 void hxge_intrs_enable(p_hxge_t hxgep);
115 static void hxge_intrs_disable(p_hxge_t hxgep);
116 static void hxge_suspend(p_hxge_t);
117 static hxge_status_t hxge_resume(p_hxge_t);
118 hxge_status_t hxge_setup_dev(p_hxge_t);
119 static void hxge_destroy_dev(p_hxge_t);
120 hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
121 static void hxge_free_mem_pool(p_hxge_t);
122 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
123 static void hxge_free_rx_mem_pool(p_hxge_t);
124 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
125 static void hxge_free_tx_mem_pool(p_hxge_t);
126 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
127     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
128     p_hxge_dma_common_t);
129 static void hxge_dma_mem_free(p_hxge_dma_common_t);
130 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
131     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
132 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
133 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
134     p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
135 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
136 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
137     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
138 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
139 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
140     p_hxge_dma_common_t *, size_t);
141 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
142 static int hxge_init_common_dev(p_hxge_t);
143 static void hxge_uninit_common_dev(p_hxge_t);
144 
145 /*
146  * The next declarations are for the GLDv3 interface.
147  */
148 static int hxge_m_start(void *);
149 static void hxge_m_stop(void *);
150 static int hxge_m_unicst(void *, const uint8_t *);
151 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
152 static int hxge_m_promisc(void *, boolean_t);
153 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
154 static void hxge_m_resources(void *);
155 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
156 
157 static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
158 static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
159 static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
160 static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
161 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
162 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
163 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
164     uint_t pr_valsize, const void *pr_val);
165 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
166     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *);
167 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num,
168     uint_t pr_valsize, void *pr_val);
169 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
170     uint_t pr_valsize, const void *pr_val);
171 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
172     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
173 static void hxge_link_poll(void *arg);
174 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
175 static void hxge_msix_init(p_hxge_t hxgep);
176 void hxge_check_msix_parity_err(p_hxge_t hxgep);
177 static uint8_t gen_32bit_parity(uint32_t data, boolean_t odd_parity);
178 
179 mac_priv_prop_t hxge_priv_props[] = {
180 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
181 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
182 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
183 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
184 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
185 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
186 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
187 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
188 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
189 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
190 };
191 
192 #define	HXGE_MAX_PRIV_PROPS	\
193 	(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
194 
195 #define	HXGE_MAGIC	0x4E584745UL
196 #define	MAX_DUMP_SZ 256
197 
198 #define	HXGE_M_CALLBACK_FLAGS	\
199 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
200 
201 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
202 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
203 
204 static mac_callbacks_t hxge_m_callbacks = {
205 	HXGE_M_CALLBACK_FLAGS,
206 	hxge_m_stat,
207 	hxge_m_start,
208 	hxge_m_stop,
209 	hxge_m_promisc,
210 	hxge_m_multicst,
211 	hxge_m_unicst,
212 	hxge_m_tx,
213 	hxge_m_resources,
214 	hxge_m_ioctl,
215 	hxge_m_getcapab,
216 	NULL,
217 	NULL,
218 	hxge_m_setprop,
219 	hxge_m_getprop
220 };
221 
222 /* Enable debug messages as necessary. */
223 uint64_t hxge_debug_level = 0;
224 
225 /*
226  * This list contains the instance structures for the Hydra
227  * devices present in the system. The lock exists to guarantee
228  * mutually exclusive access to the list.
229  */
230 void *hxge_list = NULL;
231 void *hxge_hw_list = NULL;
232 hxge_os_mutex_t hxge_common_lock;
233 
234 extern uint64_t hpi_debug_level;
235 
236 extern hxge_status_t hxge_ldgv_init();
237 extern hxge_status_t hxge_ldgv_uninit();
238 extern hxge_status_t hxge_intr_ldgv_init();
239 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
240     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
241 extern void hxge_fm_fini(p_hxge_t hxgep);
242 
243 /*
244  * Count used to maintain the number of buffers being used
245  * by Hydra instances and loaned up to the upper layers.
246  */
247 uint32_t hxge_mblks_pending = 0;
248 
249 /*
250  * Device register access attributes for PIO.
251  */
252 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
253 	DDI_DEVICE_ATTR_V0,
254 	DDI_STRUCTURE_LE_ACC,
255 	DDI_STRICTORDER_ACC,
256 };
257 
258 /*
259  * Device descriptor access attributes for DMA.
260  */
261 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
262 	DDI_DEVICE_ATTR_V0,
263 	DDI_STRUCTURE_LE_ACC,
264 	DDI_STRICTORDER_ACC
265 };
266 
267 /*
268  * Device buffer access attributes for DMA.
269  */
270 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
271 	DDI_DEVICE_ATTR_V0,
272 	DDI_STRUCTURE_BE_ACC,
273 	DDI_STRICTORDER_ACC
274 };
275 
276 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
277 	DMA_ATTR_V0,		/* version number. */
278 	0,			/* low address */
279 	0xffffffffffffffff,	/* high address */
280 	0xffffffffffffffff,	/* address counter max */
281 	0x80000,		/* alignment */
282 	0xfc00fc,		/* dlim_burstsizes */
283 	0x1,			/* minimum transfer size */
284 	0xffffffffffffffff,	/* maximum transfer size */
285 	0xffffffffffffffff,	/* maximum segment size */
286 	1,			/* scatter/gather list length */
287 	(unsigned int)1,	/* granularity */
288 	0			/* attribute flags */
289 };
290 
291 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
292 	DMA_ATTR_V0,		/* version number. */
293 	0,			/* low address */
294 	0xffffffffffffffff,	/* high address */
295 	0xffffffffffffffff,	/* address counter max */
296 	0x100000,		/* alignment */
297 	0xfc00fc,		/* dlim_burstsizes */
298 	0x1,			/* minimum transfer size */
299 	0xffffffffffffffff,	/* maximum transfer size */
300 	0xffffffffffffffff,	/* maximum segment size */
301 	1,			/* scatter/gather list length */
302 	(unsigned int)1,	/* granularity */
303 	0			/* attribute flags */
304 };
305 
306 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
307 	DMA_ATTR_V0,		/* version number. */
308 	0,			/* low address */
309 	0xffffffffffffffff,	/* high address */
310 	0xffffffffffffffff,	/* address counter max */
311 	0x40000,		/* alignment */
312 	0xfc00fc,		/* dlim_burstsizes */
313 	0x1,			/* minimum transfer size */
314 	0xffffffffffffffff,	/* maximum transfer size */
315 	0xffffffffffffffff,	/* maximum segment size */
316 	1,			/* scatter/gather list length */
317 	(unsigned int)1,	/* granularity */
318 	0			/* attribute flags */
319 };
320 
321 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
322 	DMA_ATTR_V0,		/* version number. */
323 	0,			/* low address */
324 	0xffffffffffffffff,	/* high address */
325 	0xffffffffffffffff,	/* address counter max */
326 #if defined(_BIG_ENDIAN)
327 	0x2000,			/* alignment */
328 #else
329 	0x1000,			/* alignment */
330 #endif
331 	0xfc00fc,		/* dlim_burstsizes */
332 	0x1,			/* minimum transfer size */
333 	0xffffffffffffffff,	/* maximum transfer size */
334 	0xffffffffffffffff,	/* maximum segment size */
335 	5,			/* scatter/gather list length */
336 	(unsigned int)1,	/* granularity */
337 	0			/* attribute flags */
338 };
339 
340 ddi_dma_attr_t hxge_tx_dma_attr = {
341 	DMA_ATTR_V0,		/* version number. */
342 	0,			/* low address */
343 	0xffffffffffffffff,	/* high address */
344 	0xffffffffffffffff,	/* address counter max */
345 #if defined(_BIG_ENDIAN)
346 	0x2000,			/* alignment */
347 #else
348 	0x1000,			/* alignment */
349 #endif
350 	0xfc00fc,		/* dlim_burstsizes */
351 	0x1,			/* minimum transfer size */
352 	0xffffffffffffffff,	/* maximum transfer size */
353 	0xffffffffffffffff,	/* maximum segment size */
354 	5,			/* scatter/gather list length */
355 	(unsigned int)1,	/* granularity */
356 	0			/* attribute flags */
357 };
358 
359 ddi_dma_attr_t hxge_rx_dma_attr = {
360 	DMA_ATTR_V0,		/* version number. */
361 	0,			/* low address */
362 	0xffffffffffffffff,	/* high address */
363 	0xffffffffffffffff,	/* address counter max */
364 	0x10000,		/* alignment */
365 	0xfc00fc,		/* dlim_burstsizes */
366 	0x1,			/* minimum transfer size */
367 	0xffffffffffffffff,	/* maximum transfer size */
368 	0xffffffffffffffff,	/* maximum segment size */
369 	1,			/* scatter/gather list length */
370 	(unsigned int)1,	/* granularity */
371 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
372 };
373 
374 ddi_dma_lim_t hxge_dma_limits = {
375 	(uint_t)0,		/* dlim_addr_lo */
376 	(uint_t)0xffffffff,	/* dlim_addr_hi */
377 	(uint_t)0xffffffff,	/* dlim_cntr_max */
378 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
379 	0x1,			/* dlim_minxfer */
380 	1024			/* dlim_speed */
381 };
382 
383 dma_method_t hxge_force_dma = DVMA;
384 
385 /*
386  * dma chunk sizes.
387  *
388  * Try to allocate the largest possible size
389  * so that fewer number of dma chunks would be managed
390  */
391 size_t alloc_sizes[] = {
392     0x1000, 0x2000, 0x4000, 0x8000,
393     0x10000, 0x20000, 0x40000, 0x80000,
394     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
395 };
396 
397 /*
398  * Translate "dev_t" to a pointer to the associated "dev_info_t".
399  */
400 static int
401 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
402 {
403 	p_hxge_t	hxgep = NULL;
404 	int		instance;
405 	int		status = DDI_SUCCESS;
406 
407 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
408 
409 	/*
410 	 * Get the device instance since we'll need to setup or retrieve a soft
411 	 * state for this instance.
412 	 */
413 	instance = ddi_get_instance(dip);
414 
415 	switch (cmd) {
416 	case DDI_ATTACH:
417 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
418 		break;
419 
420 	case DDI_RESUME:
421 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
422 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
423 		if (hxgep == NULL) {
424 			status = DDI_FAILURE;
425 			break;
426 		}
427 		if (hxgep->dip != dip) {
428 			status = DDI_FAILURE;
429 			break;
430 		}
431 		if (hxgep->suspended == DDI_PM_SUSPEND) {
432 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
433 		} else {
434 			(void) hxge_resume(hxgep);
435 		}
436 		goto hxge_attach_exit;
437 
438 	case DDI_PM_RESUME:
439 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
440 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
441 		if (hxgep == NULL) {
442 			status = DDI_FAILURE;
443 			break;
444 		}
445 		if (hxgep->dip != dip) {
446 			status = DDI_FAILURE;
447 			break;
448 		}
449 		(void) hxge_resume(hxgep);
450 		goto hxge_attach_exit;
451 
452 	default:
453 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
454 		status = DDI_FAILURE;
455 		goto hxge_attach_exit;
456 	}
457 
458 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
459 		status = DDI_FAILURE;
460 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
461 		    "ddi_soft_state_zalloc failed"));
462 		goto hxge_attach_exit;
463 	}
464 
465 	hxgep = ddi_get_soft_state(hxge_list, instance);
466 	if (hxgep == NULL) {
467 		status = HXGE_ERROR;
468 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
469 		    "ddi_get_soft_state failed"));
470 		goto hxge_attach_fail2;
471 	}
472 
473 	hxgep->drv_state = 0;
474 	hxgep->dip = dip;
475 	hxgep->instance = instance;
476 	hxgep->p_dip = ddi_get_parent(dip);
477 	hxgep->hxge_debug_level = hxge_debug_level;
478 	hpi_debug_level = hxge_debug_level;
479 
480 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
481 	    &hxge_rx_dma_attr);
482 
483 	status = hxge_map_regs(hxgep);
484 	if (status != HXGE_OK) {
485 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
486 		goto hxge_attach_fail3;
487 	}
488 
489 	/* Scrub the MSI-X memory */
490 	hxge_msix_init(hxgep);
491 
492 	status = hxge_init_common_dev(hxgep);
493 	if (status != HXGE_OK) {
494 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
495 		    "hxge_init_common_dev failed"));
496 		goto hxge_attach_fail4;
497 	}
498 
499 	/*
500 	 * Setup the Ndd parameters for this instance.
501 	 */
502 	hxge_init_param(hxgep);
503 
504 	/*
505 	 * Setup Register Tracing Buffer.
506 	 */
507 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
508 
509 	/* init stats ptr */
510 	hxge_init_statsp(hxgep);
511 
512 	status = hxge_setup_mutexes(hxgep);
513 	if (status != HXGE_OK) {
514 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
515 		goto hxge_attach_fail;
516 	}
517 
518 	status = hxge_get_config_properties(hxgep);
519 	if (status != HXGE_OK) {
520 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
521 		goto hxge_attach_fail;
522 	}
523 
524 	/*
525 	 * Setup the Kstats for the driver.
526 	 */
527 	hxge_setup_kstats(hxgep);
528 	hxge_setup_param(hxgep);
529 
530 	status = hxge_setup_system_dma_pages(hxgep);
531 	if (status != HXGE_OK) {
532 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
533 		goto hxge_attach_fail;
534 	}
535 
536 	hxge_hw_id_init(hxgep);
537 	hxge_hw_init_niu_common(hxgep);
538 
539 	status = hxge_setup_dev(hxgep);
540 	if (status != DDI_SUCCESS) {
541 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
542 		goto hxge_attach_fail;
543 	}
544 
545 	status = hxge_add_intrs(hxgep);
546 	if (status != DDI_SUCCESS) {
547 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
548 		goto hxge_attach_fail;
549 	}
550 
551 	status = hxge_add_soft_intrs(hxgep);
552 	if (status != DDI_SUCCESS) {
553 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
554 		goto hxge_attach_fail;
555 	}
556 
557 	/*
558 	 * Enable interrupts.
559 	 */
560 	hxge_intrs_enable(hxgep);
561 
562 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
563 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
564 		    "unable to register to mac layer (%d)", status));
565 		goto hxge_attach_fail;
566 	}
567 	mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
568 
569 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
570 	    instance));
571 
572 	goto hxge_attach_exit;
573 
574 hxge_attach_fail:
575 	hxge_unattach(hxgep);
576 	goto hxge_attach_fail1;
577 
578 hxge_attach_fail5:
579 	/*
580 	 * Tear down the ndd parameters setup.
581 	 */
582 	hxge_destroy_param(hxgep);
583 
584 	/*
585 	 * Tear down the kstat setup.
586 	 */
587 	hxge_destroy_kstats(hxgep);
588 
589 hxge_attach_fail4:
590 	if (hxgep->hxge_hw_p) {
591 		hxge_uninit_common_dev(hxgep);
592 		hxgep->hxge_hw_p = NULL;
593 	}
594 hxge_attach_fail3:
595 	/*
596 	 * Unmap the register setup.
597 	 */
598 	hxge_unmap_regs(hxgep);
599 
600 	hxge_fm_fini(hxgep);
601 
602 hxge_attach_fail2:
603 	ddi_soft_state_free(hxge_list, hxgep->instance);
604 
605 hxge_attach_fail1:
606 	if (status != HXGE_OK)
607 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
608 	hxgep = NULL;
609 
610 hxge_attach_exit:
611 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
612 	    status));
613 
614 	return (status);
615 }
616 
617 static int
618 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
619 {
620 	int		status = DDI_SUCCESS;
621 	int		instance;
622 	p_hxge_t	hxgep = NULL;
623 
624 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
625 	instance = ddi_get_instance(dip);
626 	hxgep = ddi_get_soft_state(hxge_list, instance);
627 	if (hxgep == NULL) {
628 		status = DDI_FAILURE;
629 		goto hxge_detach_exit;
630 	}
631 
632 	switch (cmd) {
633 	case DDI_DETACH:
634 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
635 		break;
636 
637 	case DDI_PM_SUSPEND:
638 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
639 		hxgep->suspended = DDI_PM_SUSPEND;
640 		hxge_suspend(hxgep);
641 		break;
642 
643 	case DDI_SUSPEND:
644 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
645 		if (hxgep->suspended != DDI_PM_SUSPEND) {
646 			hxgep->suspended = DDI_SUSPEND;
647 			hxge_suspend(hxgep);
648 		}
649 		break;
650 
651 	default:
652 		status = DDI_FAILURE;
653 		break;
654 	}
655 
656 	if (cmd != DDI_DETACH)
657 		goto hxge_detach_exit;
658 
659 	/*
660 	 * Stop the xcvr polling.
661 	 */
662 	hxgep->suspended = cmd;
663 
664 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
665 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
666 		    "<== hxge_detach status = 0x%08X", status));
667 		return (DDI_FAILURE);
668 	}
669 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
670 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
671 
672 	hxge_unattach(hxgep);
673 	hxgep = NULL;
674 
675 hxge_detach_exit:
676 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
677 	    status));
678 
679 	return (status);
680 }
681 
682 static void
683 hxge_unattach(p_hxge_t hxgep)
684 {
685 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
686 
687 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
688 		return;
689 	}
690 
691 	if (hxgep->hxge_hw_p) {
692 		hxge_uninit_common_dev(hxgep);
693 		hxgep->hxge_hw_p = NULL;
694 	}
695 
696 	if (hxgep->hxge_timerid) {
697 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
698 		hxgep->hxge_timerid = 0;
699 	}
700 
701 	/* Stop any further interrupts. */
702 	hxge_remove_intrs(hxgep);
703 
704 	/* Remove soft interrups */
705 	hxge_remove_soft_intrs(hxgep);
706 
707 	/* Stop the device and free resources. */
708 	hxge_destroy_dev(hxgep);
709 
710 	/* Tear down the ndd parameters setup. */
711 	hxge_destroy_param(hxgep);
712 
713 	/* Tear down the kstat setup. */
714 	hxge_destroy_kstats(hxgep);
715 
716 	/*
717 	 * Remove the list of ndd parameters which were setup during attach.
718 	 */
719 	if (hxgep->dip) {
720 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
721 		    " hxge_unattach: remove all properties"));
722 		(void) ddi_prop_remove_all(hxgep->dip);
723 	}
724 
725 	/*
726 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
727 	 * previous state before unmapping the registers.
728 	 */
729 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
730 	HXGE_DELAY(1000);
731 
732 	/*
733 	 * Unmap the register setup.
734 	 */
735 	hxge_unmap_regs(hxgep);
736 
737 	hxge_fm_fini(hxgep);
738 
739 	/* Destroy all mutexes.  */
740 	hxge_destroy_mutexes(hxgep);
741 
742 	/*
743 	 * Free the soft state data structures allocated with this instance.
744 	 */
745 	ddi_soft_state_free(hxge_list, hxgep->instance);
746 
747 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
748 }
749 
750 static hxge_status_t
751 hxge_map_regs(p_hxge_t hxgep)
752 {
753 	int		ddi_status = DDI_SUCCESS;
754 	p_dev_regs_t	dev_regs;
755 
756 #ifdef	HXGE_DEBUG
757 	char		*sysname;
758 #endif
759 
760 	off_t		regsize;
761 	hxge_status_t	status = HXGE_OK;
762 	int		nregs;
763 
764 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
765 
766 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
767 		return (HXGE_ERROR);
768 
769 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
770 
771 	hxgep->dev_regs = NULL;
772 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
773 	dev_regs->hxge_regh = NULL;
774 	dev_regs->hxge_pciregh = NULL;
775 	dev_regs->hxge_msix_regh = NULL;
776 
777 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
778 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
779 	    "hxge_map_regs: pci config size 0x%x", regsize));
780 
781 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
782 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
783 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
784 	if (ddi_status != DDI_SUCCESS) {
785 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
786 		    "ddi_map_regs, hxge bus config regs failed"));
787 		goto hxge_map_regs_fail0;
788 	}
789 
790 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
791 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
792 	    dev_regs->hxge_pciregp,
793 	    dev_regs->hxge_pciregh));
794 
795 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
796 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
797 	    "hxge_map_regs: pio size 0x%x", regsize));
798 
799 	/* set up the device mapped register */
800 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
801 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
802 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
803 
804 	if (ddi_status != DDI_SUCCESS) {
805 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
806 		    "ddi_map_regs for Hydra global reg failed"));
807 		goto hxge_map_regs_fail1;
808 	}
809 
810 	/* set up the msi/msi-x mapped register */
811 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
812 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
813 	    "hxge_map_regs: msix size 0x%x", regsize));
814 
815 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
816 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
817 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
818 
819 	if (ddi_status != DDI_SUCCESS) {
820 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
821 		    "ddi_map_regs for msi reg failed"));
822 		goto hxge_map_regs_fail2;
823 	}
824 
825 	hxgep->dev_regs = dev_regs;
826 
827 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
828 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
829 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
830 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
831 
832 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
833 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
834 
835 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
836 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
837 
838 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
839 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
840 
841 	goto hxge_map_regs_exit;
842 
843 hxge_map_regs_fail3:
844 	if (dev_regs->hxge_msix_regh) {
845 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
846 	}
847 
848 hxge_map_regs_fail2:
849 	if (dev_regs->hxge_regh) {
850 		ddi_regs_map_free(&dev_regs->hxge_regh);
851 	}
852 
853 hxge_map_regs_fail1:
854 	if (dev_regs->hxge_pciregh) {
855 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
856 	}
857 
858 hxge_map_regs_fail0:
859 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
860 	kmem_free(dev_regs, sizeof (dev_regs_t));
861 
862 hxge_map_regs_exit:
863 	if (ddi_status != DDI_SUCCESS)
864 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
865 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
866 	return (status);
867 }
868 
869 static void
870 hxge_unmap_regs(p_hxge_t hxgep)
871 {
872 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
873 	if (hxgep->dev_regs) {
874 		if (hxgep->dev_regs->hxge_pciregh) {
875 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
876 			    "==> hxge_unmap_regs: bus"));
877 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
878 			hxgep->dev_regs->hxge_pciregh = NULL;
879 		}
880 
881 		if (hxgep->dev_regs->hxge_regh) {
882 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
883 			    "==> hxge_unmap_regs: device registers"));
884 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
885 			hxgep->dev_regs->hxge_regh = NULL;
886 		}
887 
888 		if (hxgep->dev_regs->hxge_msix_regh) {
889 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
890 			    "==> hxge_unmap_regs: device interrupts"));
891 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
892 			hxgep->dev_regs->hxge_msix_regh = NULL;
893 		}
894 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
895 		hxgep->dev_regs = NULL;
896 	}
897 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
898 }
899 
900 static hxge_status_t
901 hxge_setup_mutexes(p_hxge_t hxgep)
902 {
903 	int		ddi_status = DDI_SUCCESS;
904 	hxge_status_t	status = HXGE_OK;
905 
906 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
907 
908 	/*
909 	 * Get the interrupt cookie so the mutexes can be Initialised.
910 	 */
911 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
912 	    &hxgep->interrupt_cookie);
913 
914 	if (ddi_status != DDI_SUCCESS) {
915 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
916 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
917 		goto hxge_setup_mutexes_exit;
918 	}
919 
920 	/*
921 	 * Initialize mutex's for this device.
922 	 */
923 	MUTEX_INIT(hxgep->genlock, NULL,
924 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
925 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
926 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
927 	RW_INIT(&hxgep->filter_lock, NULL,
928 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
929 	MUTEX_INIT(&hxgep->pio_lock, NULL,
930 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
931 	MUTEX_INIT(&hxgep->timeout.lock, NULL,
932 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
933 
934 hxge_setup_mutexes_exit:
935 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
936 	    "<== hxge_setup_mutexes status = %x", status));
937 
938 	if (ddi_status != DDI_SUCCESS)
939 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
940 
941 	return (status);
942 }
943 
944 static void
945 hxge_destroy_mutexes(p_hxge_t hxgep)
946 {
947 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
948 	RW_DESTROY(&hxgep->filter_lock);
949 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
950 	MUTEX_DESTROY(hxgep->genlock);
951 	MUTEX_DESTROY(&hxgep->pio_lock);
952 	MUTEX_DESTROY(&hxgep->timeout.lock);
953 
954 	if (hxge_debug_init == 1) {
955 		MUTEX_DESTROY(&hxgedebuglock);
956 		hxge_debug_init = 0;
957 	}
958 
959 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
960 }
961 
962 hxge_status_t
963 hxge_init(p_hxge_t hxgep)
964 {
965 	hxge_status_t status = HXGE_OK;
966 
967 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
968 
969 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
970 		return (status);
971 	}
972 
973 	/*
974 	 * Allocate system memory for the receive/transmit buffer blocks and
975 	 * receive/transmit descriptor rings.
976 	 */
977 	status = hxge_alloc_mem_pool(hxgep);
978 	if (status != HXGE_OK) {
979 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
980 		goto hxge_init_fail1;
981 	}
982 
983 	/*
984 	 * Initialize and enable TXDMA channels.
985 	 */
986 	status = hxge_init_txdma_channels(hxgep);
987 	if (status != HXGE_OK) {
988 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
989 		goto hxge_init_fail3;
990 	}
991 
992 	/*
993 	 * Initialize and enable RXDMA channels.
994 	 */
995 	status = hxge_init_rxdma_channels(hxgep);
996 	if (status != HXGE_OK) {
997 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
998 		goto hxge_init_fail4;
999 	}
1000 
1001 	/*
1002 	 * Initialize TCAM
1003 	 */
1004 	status = hxge_classify_init(hxgep);
1005 	if (status != HXGE_OK) {
1006 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1007 		goto hxge_init_fail5;
1008 	}
1009 
1010 	/*
1011 	 * Initialize the VMAC block.
1012 	 */
1013 	status = hxge_vmac_init(hxgep);
1014 	if (status != HXGE_OK) {
1015 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1016 		goto hxge_init_fail5;
1017 	}
1018 
1019 	/* Bringup - this may be unnecessary when PXE and FCODE available */
1020 	status = hxge_pfc_set_default_mac_addr(hxgep);
1021 	if (status != HXGE_OK) {
1022 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1023 		    "Default Address Failure\n"));
1024 		goto hxge_init_fail5;
1025 	}
1026 
1027 	hxge_intrs_enable(hxgep);
1028 
1029 	/*
1030 	 * Enable hardware interrupts.
1031 	 */
1032 	hxge_intr_hw_enable(hxgep);
1033 	hxgep->drv_state |= STATE_HW_INITIALIZED;
1034 
1035 	goto hxge_init_exit;
1036 
1037 hxge_init_fail5:
1038 	hxge_uninit_rxdma_channels(hxgep);
1039 hxge_init_fail4:
1040 	hxge_uninit_txdma_channels(hxgep);
1041 hxge_init_fail3:
1042 	hxge_free_mem_pool(hxgep);
1043 hxge_init_fail1:
1044 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1045 	    "<== hxge_init status (failed) = 0x%08x", status));
1046 	return (status);
1047 
1048 hxge_init_exit:
1049 
1050 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1051 	    status));
1052 
1053 	return (status);
1054 }
1055 
1056 timeout_id_t
1057 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1058 {
1059 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1060 		return (timeout(func, (caddr_t)hxgep,
1061 		    drv_usectohz(1000 * msec)));
1062 	}
1063 	return (NULL);
1064 }
1065 
1066 /*ARGSUSED*/
1067 void
1068 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1069 {
1070 	if (timerid) {
1071 		(void) untimeout(timerid);
1072 	}
1073 }
1074 
1075 void
1076 hxge_uninit(p_hxge_t hxgep)
1077 {
1078 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1079 
1080 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1081 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1082 		    "==> hxge_uninit: not initialized"));
1083 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1084 		return;
1085 	}
1086 
1087 	/* Stop timer */
1088 	if (hxgep->hxge_timerid) {
1089 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1090 		hxgep->hxge_timerid = 0;
1091 	}
1092 
1093 	(void) hxge_intr_hw_disable(hxgep);
1094 
1095 	/* Reset the receive VMAC side.  */
1096 	(void) hxge_rx_vmac_disable(hxgep);
1097 
1098 	/* Free classification resources */
1099 	(void) hxge_classify_uninit(hxgep);
1100 
1101 	/* Reset the transmit/receive DMA side.  */
1102 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1103 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1104 
1105 	hxge_uninit_txdma_channels(hxgep);
1106 	hxge_uninit_rxdma_channels(hxgep);
1107 
1108 	/* Reset the transmit VMAC side.  */
1109 	(void) hxge_tx_vmac_disable(hxgep);
1110 
1111 	hxge_free_mem_pool(hxgep);
1112 
1113 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1114 
1115 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1116 }
1117 
1118 void
1119 hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
1120 {
1121 #if defined(__i386)
1122 	size_t		reg;
1123 #else
1124 	uint64_t	reg;
1125 #endif
1126 	uint64_t	regdata;
1127 	int		i, retry;
1128 
1129 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1130 	regdata = 0;
1131 	retry = 1;
1132 
1133 	for (i = 0; i < retry; i++) {
1134 		HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
1135 	}
1136 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1137 }
1138 
1139 void
1140 hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
1141 {
1142 #if defined(__i386)
1143 	size_t		reg;
1144 #else
1145 	uint64_t	reg;
1146 #endif
1147 	uint64_t	buf[2];
1148 
1149 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1150 #if defined(__i386)
1151 	reg = (size_t)buf[0];
1152 #else
1153 	reg = buf[0];
1154 #endif
1155 
1156 	HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
1157 }
1158 
1159 /*ARGSUSED*/
1160 /*VARARGS*/
1161 void
1162 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1163 {
1164 	char		msg_buffer[1048];
1165 	char		prefix_buffer[32];
1166 	int		instance;
1167 	uint64_t	debug_level;
1168 	int		cmn_level = CE_CONT;
1169 	va_list		ap;
1170 
1171 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1172 	    hxgep->hxge_debug_level;
1173 
1174 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1175 	    (level == HXGE_ERR_CTL)) {
1176 		/* do the msg processing */
1177 		if (hxge_debug_init == 0) {
1178 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1179 			hxge_debug_init = 1;
1180 		}
1181 
1182 		MUTEX_ENTER(&hxgedebuglock);
1183 
1184 		if ((level & HXGE_NOTE)) {
1185 			cmn_level = CE_NOTE;
1186 		}
1187 
1188 		if (level & HXGE_ERR_CTL) {
1189 			cmn_level = CE_WARN;
1190 		}
1191 
1192 		va_start(ap, fmt);
1193 		(void) vsprintf(msg_buffer, fmt, ap);
1194 		va_end(ap);
1195 
1196 		if (hxgep == NULL) {
1197 			instance = -1;
1198 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1199 		} else {
1200 			instance = hxgep->instance;
1201 			(void) sprintf(prefix_buffer,
1202 			    "%s%d :", "hxge", instance);
1203 		}
1204 
1205 		MUTEX_EXIT(&hxgedebuglock);
1206 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1207 	}
1208 }
1209 
1210 char *
1211 hxge_dump_packet(char *addr, int size)
1212 {
1213 	uchar_t		*ap = (uchar_t *)addr;
1214 	int		i;
1215 	static char	etherbuf[1024];
1216 	char		*cp = etherbuf;
1217 	char		digits[] = "0123456789abcdef";
1218 
1219 	if (!size)
1220 		size = 60;
1221 
1222 	if (size > MAX_DUMP_SZ) {
1223 		/* Dump the leading bytes */
1224 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1225 			if (*ap > 0x0f)
1226 				*cp++ = digits[*ap >> 4];
1227 			*cp++ = digits[*ap++ & 0xf];
1228 			*cp++ = ':';
1229 		}
1230 		for (i = 0; i < 20; i++)
1231 			*cp++ = '.';
1232 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1233 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1234 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1235 			if (*ap > 0x0f)
1236 				*cp++ = digits[*ap >> 4];
1237 			*cp++ = digits[*ap++ & 0xf];
1238 			*cp++ = ':';
1239 		}
1240 	} else {
1241 		for (i = 0; i < size; i++) {
1242 			if (*ap > 0x0f)
1243 				*cp++ = digits[*ap >> 4];
1244 			*cp++ = digits[*ap++ & 0xf];
1245 			*cp++ = ':';
1246 		}
1247 	}
1248 	*--cp = 0;
1249 	return (etherbuf);
1250 }
1251 
1252 static void
1253 hxge_suspend(p_hxge_t hxgep)
1254 {
1255 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1256 
1257 	/*
1258 	 * Stop the link status timer before hxge_intrs_disable() to avoid
1259 	 * accessing the the MSIX table simultaneously. Note that the timer
1260 	 * routine polls for MSIX parity errors.
1261 	 */
1262 	MUTEX_ENTER(&hxgep->timeout.lock);
1263 	if (hxgep->timeout.id)
1264 		(void) untimeout(hxgep->timeout.id);
1265 	MUTEX_EXIT(&hxgep->timeout.lock);
1266 
1267 	hxge_intrs_disable(hxgep);
1268 	hxge_destroy_dev(hxgep);
1269 
1270 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1271 }
1272 
1273 static hxge_status_t
1274 hxge_resume(p_hxge_t hxgep)
1275 {
1276 	hxge_status_t status = HXGE_OK;
1277 
1278 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1279 	hxgep->suspended = DDI_RESUME;
1280 
1281 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1282 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1283 
1284 	(void) hxge_rx_vmac_enable(hxgep);
1285 	(void) hxge_tx_vmac_enable(hxgep);
1286 
1287 	hxge_intrs_enable(hxgep);
1288 
1289 	hxgep->suspended = 0;
1290 
1291 	/*
1292 	 * Resume the link status timer after hxge_intrs_enable to avoid
1293 	 * accessing MSIX table simultaneously.
1294 	 */
1295 	MUTEX_ENTER(&hxgep->timeout.lock);
1296 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1297 	    hxgep->timeout.ticks);
1298 	MUTEX_EXIT(&hxgep->timeout.lock);
1299 
1300 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1301 	    "<== hxge_resume status = 0x%x", status));
1302 
1303 	return (status);
1304 }
1305 
1306 hxge_status_t
1307 hxge_setup_dev(p_hxge_t hxgep)
1308 {
1309 	hxge_status_t status = HXGE_OK;
1310 
1311 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1312 
1313 	status = hxge_link_init(hxgep);
1314 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1315 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1316 		    "Bad register acc handle"));
1317 		status = HXGE_ERROR;
1318 	}
1319 
1320 	if (status != HXGE_OK) {
1321 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1322 		    " hxge_setup_dev status (link init 0x%08x)", status));
1323 		goto hxge_setup_dev_exit;
1324 	}
1325 
1326 hxge_setup_dev_exit:
1327 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1328 	    "<== hxge_setup_dev status = 0x%08x", status));
1329 
1330 	return (status);
1331 }
1332 
1333 static void
1334 hxge_destroy_dev(p_hxge_t hxgep)
1335 {
1336 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1337 
1338 	(void) hxge_hw_stop(hxgep);
1339 
1340 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1341 }
1342 
1343 static hxge_status_t
1344 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1345 {
1346 	int			ddi_status = DDI_SUCCESS;
1347 	uint_t			count;
1348 	ddi_dma_cookie_t	cookie;
1349 	uint_t			iommu_pagesize;
1350 	hxge_status_t		status = HXGE_OK;
1351 
1352 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1353 
1354 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1355 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1356 
1357 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1358 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1359 	    " default_block_size %d iommu_pagesize %d",
1360 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1361 	    hxgep->rx_default_block_size, iommu_pagesize));
1362 
1363 	if (iommu_pagesize != 0) {
1364 		if (hxgep->sys_page_sz == iommu_pagesize) {
1365 			/* Hydra support up to 8K pages */
1366 			if (iommu_pagesize > 0x2000)
1367 				hxgep->sys_page_sz = 0x2000;
1368 		} else {
1369 			if (hxgep->sys_page_sz > iommu_pagesize)
1370 				hxgep->sys_page_sz = iommu_pagesize;
1371 		}
1372 	}
1373 
1374 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1375 
1376 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1377 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1378 	    "default_block_size %d page mask %d",
1379 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1380 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1381 
1382 	switch (hxgep->sys_page_sz) {
1383 	default:
1384 		hxgep->sys_page_sz = 0x1000;
1385 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1386 		hxgep->rx_default_block_size = 0x1000;
1387 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1388 		break;
1389 	case 0x1000:
1390 		hxgep->rx_default_block_size = 0x1000;
1391 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1392 		break;
1393 	case 0x2000:
1394 		hxgep->rx_default_block_size = 0x2000;
1395 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1396 		break;
1397 	}
1398 
1399 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1400 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1401 
1402 	/*
1403 	 * Get the system DMA burst size.
1404 	 */
1405 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1406 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1407 	if (ddi_status != DDI_SUCCESS) {
1408 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1409 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1410 		goto hxge_get_soft_properties_exit;
1411 	}
1412 
1413 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1414 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1415 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1416 	    &cookie, &count);
1417 	if (ddi_status != DDI_DMA_MAPPED) {
1418 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1419 		    "Binding spare handle to find system burstsize failed."));
1420 		ddi_status = DDI_FAILURE;
1421 		goto hxge_get_soft_properties_fail1;
1422 	}
1423 
1424 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1425 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1426 
1427 hxge_get_soft_properties_fail1:
1428 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1429 
1430 hxge_get_soft_properties_exit:
1431 
1432 	if (ddi_status != DDI_SUCCESS)
1433 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1434 
1435 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1436 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1437 
1438 	return (status);
1439 }
1440 
1441 hxge_status_t
1442 hxge_alloc_mem_pool(p_hxge_t hxgep)
1443 {
1444 	hxge_status_t status = HXGE_OK;
1445 
1446 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1447 
1448 	status = hxge_alloc_rx_mem_pool(hxgep);
1449 	if (status != HXGE_OK) {
1450 		return (HXGE_ERROR);
1451 	}
1452 
1453 	status = hxge_alloc_tx_mem_pool(hxgep);
1454 	if (status != HXGE_OK) {
1455 		hxge_free_rx_mem_pool(hxgep);
1456 		return (HXGE_ERROR);
1457 	}
1458 
1459 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1460 	return (HXGE_OK);
1461 }
1462 
1463 static void
1464 hxge_free_mem_pool(p_hxge_t hxgep)
1465 {
1466 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1467 
1468 	hxge_free_rx_mem_pool(hxgep);
1469 	hxge_free_tx_mem_pool(hxgep);
1470 
1471 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1472 }
1473 
1474 static hxge_status_t
1475 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1476 {
1477 	int			i, j;
1478 	uint32_t		ndmas, st_rdc;
1479 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1480 	p_hxge_hw_pt_cfg_t	p_cfgp;
1481 	p_hxge_dma_pool_t	dma_poolp;
1482 	p_hxge_dma_common_t	*dma_buf_p;
1483 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1484 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1485 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1486 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1487 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1488 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1489 	size_t			rx_buf_alloc_size;
1490 	size_t			rx_rbr_cntl_alloc_size;
1491 	size_t			rx_rcr_cntl_alloc_size;
1492 	size_t			rx_mbox_cntl_alloc_size;
1493 	uint32_t		*num_chunks;	/* per dma */
1494 	hxge_status_t		status = HXGE_OK;
1495 
1496 	uint32_t		hxge_port_rbr_size;
1497 	uint32_t		hxge_port_rbr_spare_size;
1498 	uint32_t		hxge_port_rcr_size;
1499 
1500 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1501 
1502 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1503 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1504 	st_rdc = p_cfgp->start_rdc;
1505 	ndmas = p_cfgp->max_rdcs;
1506 
1507 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1508 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1509 
1510 	/*
1511 	 * Allocate memory for each receive DMA channel.
1512 	 */
1513 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1514 	    KM_SLEEP);
1515 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1516 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1517 
1518 	dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1519 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1520 	dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1521 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1522 	dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1523 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1524 	dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1525 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1526 	dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1527 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1528 	dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1529 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1530 
1531 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1532 	    KM_SLEEP);
1533 
1534 	/*
1535 	 * Assume that each DMA channel will be configured with default block
1536 	 * size. rbr block counts are mod of batch count (16).
1537 	 */
1538 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1539 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1540 
1541 	if (!hxge_port_rbr_size) {
1542 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1543 	}
1544 
1545 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1546 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1547 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1548 	}
1549 
1550 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1551 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1552 
1553 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1554 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1555 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1556 	}
1557 
1558 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1559 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1560 
1561 	/*
1562 	 * Addresses of receive block ring, receive completion ring and the
1563 	 * mailbox must be all cache-aligned (64 bytes).
1564 	 */
1565 	rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1566 	rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1567 	rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1568 	rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1569 
1570 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1571 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1572 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1573 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1574 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1575 
1576 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1577 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1578 
1579 	/*
1580 	 * Allocate memory for receive buffers and descriptor rings. Replace
1581 	 * allocation functions with interface functions provided by the
1582 	 * partition manager when it is available.
1583 	 */
1584 	/*
1585 	 * Allocate memory for the receive buffer blocks.
1586 	 */
1587 	for (i = 0; i < ndmas; i++) {
1588 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1589 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1590 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1591 		    i, dma_buf_p[i], &dma_buf_p[i]));
1592 
1593 		num_chunks[i] = 0;
1594 
1595 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1596 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1597 		    &num_chunks[i]);
1598 		if (status != HXGE_OK) {
1599 			break;
1600 		}
1601 
1602 		st_rdc++;
1603 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1604 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1605 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1606 		    dma_buf_p[i], &dma_buf_p[i]));
1607 	}
1608 
1609 	if (i < ndmas) {
1610 		goto hxge_alloc_rx_mem_fail1;
1611 	}
1612 
1613 	/*
1614 	 * Allocate memory for descriptor rings and mailbox.
1615 	 */
1616 	st_rdc = p_cfgp->start_rdc;
1617 	for (j = 0; j < ndmas; j++) {
1618 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1619 		    &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1620 		    rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1621 			break;
1622 		}
1623 
1624 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1625 		    &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1626 		    rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1627 			break;
1628 		}
1629 
1630 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1631 		    &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1632 		    rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1633 			break;
1634 		}
1635 		st_rdc++;
1636 	}
1637 
1638 	if (j < ndmas) {
1639 		goto hxge_alloc_rx_mem_fail2;
1640 	}
1641 
1642 	dma_poolp->ndmas = ndmas;
1643 	dma_poolp->num_chunks = num_chunks;
1644 	dma_poolp->buf_allocated = B_TRUE;
1645 	hxgep->rx_buf_pool_p = dma_poolp;
1646 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1647 
1648 	dma_rbr_cntl_poolp->ndmas = ndmas;
1649 	dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1650 	hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1651 	dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1652 
1653 	dma_rcr_cntl_poolp->ndmas = ndmas;
1654 	dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1655 	hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1656 	dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1657 
1658 	dma_mbox_cntl_poolp->ndmas = ndmas;
1659 	dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1660 	hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1661 	dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1662 
1663 	goto hxge_alloc_rx_mem_pool_exit;
1664 
1665 hxge_alloc_rx_mem_fail2:
1666 	/* Free control buffers */
1667 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1668 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1669 	for (; j >= 0; j--) {
1670 		hxge_free_rx_cntl_dma(hxgep,
1671 		    (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1672 		hxge_free_rx_cntl_dma(hxgep,
1673 		    (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1674 		hxge_free_rx_cntl_dma(hxgep,
1675 		    (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1676 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1677 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1678 	}
1679 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1680 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1681 
1682 hxge_alloc_rx_mem_fail1:
1683 	/* Free data buffers */
1684 	i--;
1685 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1686 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1687 	for (; i >= 0; i--) {
1688 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1689 		    num_chunks[i]);
1690 	}
1691 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1692 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1693 
1694 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1695 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1696 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1697 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1698 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1699 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1700 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1701 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1702 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1703 
1704 hxge_alloc_rx_mem_pool_exit:
1705 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1706 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1707 
1708 	return (status);
1709 }
1710 
1711 static void
1712 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1713 {
1714 	uint32_t		i, ndmas;
1715 	p_hxge_dma_pool_t	dma_poolp;
1716 	p_hxge_dma_common_t	*dma_buf_p;
1717 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1718 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1719 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1720 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1721 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1722 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1723 	uint32_t		*num_chunks;
1724 
1725 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1726 
1727 	dma_poolp = hxgep->rx_buf_pool_p;
1728 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1729 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1730 		    "(null rx buf pool or buf not allocated"));
1731 		return;
1732 	}
1733 
1734 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1735 	if (dma_rbr_cntl_poolp == NULL ||
1736 	    (!dma_rbr_cntl_poolp->buf_allocated)) {
1737 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1738 		    "<== hxge_free_rx_mem_pool "
1739 		    "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1740 		return;
1741 	}
1742 
1743 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1744 	if (dma_rcr_cntl_poolp == NULL ||
1745 	    (!dma_rcr_cntl_poolp->buf_allocated)) {
1746 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1747 		    "<== hxge_free_rx_mem_pool "
1748 		    "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1749 		return;
1750 	}
1751 
1752 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1753 	if (dma_mbox_cntl_poolp == NULL ||
1754 	    (!dma_mbox_cntl_poolp->buf_allocated)) {
1755 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1756 		    "<== hxge_free_rx_mem_pool "
1757 		    "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1758 		return;
1759 	}
1760 
1761 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1762 	num_chunks = dma_poolp->num_chunks;
1763 
1764 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1765 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1766 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1767 	ndmas = dma_rbr_cntl_poolp->ndmas;
1768 
1769 	for (i = 0; i < ndmas; i++) {
1770 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1771 	}
1772 
1773 	for (i = 0; i < ndmas; i++) {
1774 		hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1775 		hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1776 		hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1777 	}
1778 
1779 	for (i = 0; i < ndmas; i++) {
1780 		KMEM_FREE(dma_buf_p[i],
1781 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1782 		KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1783 		KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1784 		KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1785 	}
1786 
1787 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1788 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1789 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1790 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1791 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1792 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1793 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1794 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1795 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1796 
1797 	hxgep->rx_buf_pool_p = NULL;
1798 	hxgep->rx_rbr_cntl_pool_p = NULL;
1799 	hxgep->rx_rcr_cntl_pool_p = NULL;
1800 	hxgep->rx_mbox_cntl_pool_p = NULL;
1801 
1802 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1803 }
1804 
1805 static hxge_status_t
1806 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1807     p_hxge_dma_common_t *dmap,
1808     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1809 {
1810 	p_hxge_dma_common_t	rx_dmap;
1811 	hxge_status_t		status = HXGE_OK;
1812 	size_t			total_alloc_size;
1813 	size_t			allocated = 0;
1814 	int			i, size_index, array_size;
1815 
1816 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1817 
1818 	rx_dmap = (p_hxge_dma_common_t)
1819 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1820 
1821 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1822 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1823 	    dma_channel, alloc_size, block_size, dmap));
1824 
1825 	total_alloc_size = alloc_size;
1826 
1827 	i = 0;
1828 	size_index = 0;
1829 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1830 	while ((size_index < array_size) &&
1831 	    (alloc_sizes[size_index] < alloc_size))
1832 		size_index++;
1833 	if (size_index >= array_size) {
1834 		size_index = array_size - 1;
1835 	}
1836 
1837 	while ((allocated < total_alloc_size) &&
1838 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1839 		rx_dmap[i].dma_chunk_index = i;
1840 		rx_dmap[i].block_size = block_size;
1841 		rx_dmap[i].alength = alloc_sizes[size_index];
1842 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1843 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1844 		rx_dmap[i].dma_channel = dma_channel;
1845 		rx_dmap[i].contig_alloc_type = B_FALSE;
1846 
1847 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1848 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1849 		    "i %d nblocks %d alength %d",
1850 		    dma_channel, i, &rx_dmap[i], block_size,
1851 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1852 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1853 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1854 		    &hxge_dev_buf_dma_acc_attr,
1855 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1856 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1857 		if (status != HXGE_OK) {
1858 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1859 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1860 			    " for size: %d", alloc_sizes[size_index]));
1861 			size_index--;
1862 		} else {
1863 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1864 			    " alloc_rx_buf_dma allocated rdc %d "
1865 			    "chunk %d size %x dvma %x bufp %llx ",
1866 			    dma_channel, i, rx_dmap[i].alength,
1867 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1868 			i++;
1869 			allocated += alloc_sizes[size_index];
1870 		}
1871 	}
1872 
1873 	if (allocated < total_alloc_size) {
1874 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1875 		    " hxge_alloc_rx_buf_dma failed due to"
1876 		    " allocated(%d) < required(%d)",
1877 		    allocated, total_alloc_size));
1878 		goto hxge_alloc_rx_mem_fail1;
1879 	}
1880 
1881 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1882 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1883 
1884 	*num_chunks = i;
1885 	*dmap = rx_dmap;
1886 
1887 	goto hxge_alloc_rx_mem_exit;
1888 
1889 hxge_alloc_rx_mem_fail1:
1890 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1891 
1892 hxge_alloc_rx_mem_exit:
1893 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1894 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1895 
1896 	return (status);
1897 }
1898 
1899 /*ARGSUSED*/
1900 static void
1901 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1902     uint32_t num_chunks)
1903 {
1904 	int i;
1905 
1906 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1907 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1908 
1909 	for (i = 0; i < num_chunks; i++) {
1910 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1911 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1912 		hxge_dma_mem_free(dmap++);
1913 	}
1914 
1915 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1916 }
1917 
1918 /*ARGSUSED*/
1919 static hxge_status_t
1920 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1921     p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1922 {
1923 	p_hxge_dma_common_t	rx_dmap;
1924 	hxge_status_t		status = HXGE_OK;
1925 
1926 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1927 
1928 	rx_dmap = (p_hxge_dma_common_t)
1929 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1930 
1931 	rx_dmap->contig_alloc_type = B_FALSE;
1932 
1933 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1934 	    attr, size, &hxge_dev_desc_dma_acc_attr,
1935 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1936 	if (status != HXGE_OK) {
1937 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1938 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1939 		    " for size: %d", size));
1940 		goto hxge_alloc_rx_cntl_dma_fail1;
1941 	}
1942 
1943 	*dmap = rx_dmap;
1944 
1945 	goto hxge_alloc_rx_cntl_dma_exit;
1946 
1947 hxge_alloc_rx_cntl_dma_fail1:
1948 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1949 
1950 hxge_alloc_rx_cntl_dma_exit:
1951 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1952 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1953 
1954 	return (status);
1955 }
1956 
1957 /*ARGSUSED*/
1958 static void
1959 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1960 {
1961 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1962 
1963 	hxge_dma_mem_free(dmap);
1964 
1965 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1966 }
1967 
1968 static hxge_status_t
1969 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1970 {
1971 	hxge_status_t		status = HXGE_OK;
1972 	int			i, j;
1973 	uint32_t		ndmas, st_tdc;
1974 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1975 	p_hxge_hw_pt_cfg_t	p_cfgp;
1976 	p_hxge_dma_pool_t	dma_poolp;
1977 	p_hxge_dma_common_t	*dma_buf_p;
1978 	p_hxge_dma_pool_t	dma_cntl_poolp;
1979 	p_hxge_dma_common_t	*dma_cntl_p;
1980 	size_t			tx_buf_alloc_size;
1981 	size_t			tx_cntl_alloc_size;
1982 	uint32_t		*num_chunks;	/* per dma */
1983 
1984 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1985 
1986 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1987 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1988 	st_tdc = p_cfgp->start_tdc;
1989 	ndmas = p_cfgp->max_tdcs;
1990 
1991 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1992 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1993 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1994 	/*
1995 	 * Allocate memory for each transmit DMA channel.
1996 	 */
1997 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1998 	    KM_SLEEP);
1999 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
2000 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
2001 
2002 	dma_cntl_poolp = (p_hxge_dma_pool_t)
2003 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
2004 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
2005 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
2006 
2007 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
2008 
2009 	/*
2010 	 * Assume that each DMA channel will be configured with default
2011 	 * transmit bufer size for copying transmit data. (For packet payload
2012 	 * over this limit, packets will not be copied.)
2013 	 */
2014 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
2015 
2016 	/*
2017 	 * Addresses of transmit descriptor ring and the mailbox must be all
2018 	 * cache-aligned (64 bytes).
2019 	 */
2020 	tx_cntl_alloc_size = hxge_tx_ring_size;
2021 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
2022 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
2023 
2024 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
2025 	    KM_SLEEP);
2026 
2027 	/*
2028 	 * Allocate memory for transmit buffers and descriptor rings. Replace
2029 	 * allocation functions with interface functions provided by the
2030 	 * partition manager when it is available.
2031 	 *
2032 	 * Allocate memory for the transmit buffer pool.
2033 	 */
2034 	for (i = 0; i < ndmas; i++) {
2035 		num_chunks[i] = 0;
2036 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
2037 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
2038 		if (status != HXGE_OK) {
2039 			break;
2040 		}
2041 		st_tdc++;
2042 	}
2043 
2044 	if (i < ndmas) {
2045 		goto hxge_alloc_tx_mem_pool_fail1;
2046 	}
2047 
2048 	st_tdc = p_cfgp->start_tdc;
2049 
2050 	/*
2051 	 * Allocate memory for descriptor rings and mailbox.
2052 	 */
2053 	for (j = 0; j < ndmas; j++) {
2054 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2055 		    tx_cntl_alloc_size);
2056 		if (status != HXGE_OK) {
2057 			break;
2058 		}
2059 		st_tdc++;
2060 	}
2061 
2062 	if (j < ndmas) {
2063 		goto hxge_alloc_tx_mem_pool_fail2;
2064 	}
2065 
2066 	dma_poolp->ndmas = ndmas;
2067 	dma_poolp->num_chunks = num_chunks;
2068 	dma_poolp->buf_allocated = B_TRUE;
2069 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2070 	hxgep->tx_buf_pool_p = dma_poolp;
2071 
2072 	dma_cntl_poolp->ndmas = ndmas;
2073 	dma_cntl_poolp->buf_allocated = B_TRUE;
2074 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2075 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2076 
2077 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2078 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2079 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2080 
2081 	goto hxge_alloc_tx_mem_pool_exit;
2082 
2083 hxge_alloc_tx_mem_pool_fail2:
2084 	/* Free control buffers */
2085 	j--;
2086 	for (; j >= 0; j--) {
2087 		hxge_free_tx_cntl_dma(hxgep,
2088 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
2089 	}
2090 
2091 hxge_alloc_tx_mem_pool_fail1:
2092 	/* Free data buffers */
2093 	i--;
2094 	for (; i >= 0; i--) {
2095 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2096 		    num_chunks[i]);
2097 	}
2098 
2099 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2100 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2101 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2102 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2103 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2104 
2105 hxge_alloc_tx_mem_pool_exit:
2106 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2107 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2108 
2109 	return (status);
2110 }
2111 
2112 static hxge_status_t
2113 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2114     p_hxge_dma_common_t *dmap, size_t alloc_size,
2115     size_t block_size, uint32_t *num_chunks)
2116 {
2117 	p_hxge_dma_common_t	tx_dmap;
2118 	hxge_status_t		status = HXGE_OK;
2119 	size_t			total_alloc_size;
2120 	size_t			allocated = 0;
2121 	int			i, size_index, array_size;
2122 
2123 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2124 
2125 	tx_dmap = (p_hxge_dma_common_t)
2126 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2127 
2128 	total_alloc_size = alloc_size;
2129 	i = 0;
2130 	size_index = 0;
2131 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
2132 	while ((size_index < array_size) &&
2133 	    (alloc_sizes[size_index] < alloc_size))
2134 		size_index++;
2135 	if (size_index >= array_size) {
2136 		size_index = array_size - 1;
2137 	}
2138 
2139 	while ((allocated < total_alloc_size) &&
2140 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2141 		tx_dmap[i].dma_chunk_index = i;
2142 		tx_dmap[i].block_size = block_size;
2143 		tx_dmap[i].alength = alloc_sizes[size_index];
2144 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2145 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2146 		tx_dmap[i].dma_channel = dma_channel;
2147 		tx_dmap[i].contig_alloc_type = B_FALSE;
2148 
2149 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2150 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
2151 		    &hxge_dev_buf_dma_acc_attr,
2152 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
2153 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
2154 		if (status != HXGE_OK) {
2155 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2156 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
2157 			    " for size: %d", alloc_sizes[size_index]));
2158 			size_index--;
2159 		} else {
2160 			i++;
2161 			allocated += alloc_sizes[size_index];
2162 		}
2163 	}
2164 
2165 	if (allocated < total_alloc_size) {
2166 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2167 		    " hxge_alloc_tx_buf_dma: failed due to"
2168 		    " allocated(%d) < required(%d)",
2169 		    allocated, total_alloc_size));
2170 		goto hxge_alloc_tx_mem_fail1;
2171 	}
2172 
2173 	*num_chunks = i;
2174 	*dmap = tx_dmap;
2175 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2176 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2177 	    *dmap, i));
2178 	goto hxge_alloc_tx_mem_exit;
2179 
2180 hxge_alloc_tx_mem_fail1:
2181 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2182 
2183 hxge_alloc_tx_mem_exit:
2184 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2185 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2186 
2187 	return (status);
2188 }
2189 
2190 /*ARGSUSED*/
2191 static void
2192 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2193     uint32_t num_chunks)
2194 {
2195 	int i;
2196 
2197 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2198 
2199 	for (i = 0; i < num_chunks; i++) {
2200 		hxge_dma_mem_free(dmap++);
2201 	}
2202 
2203 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2204 }
2205 
2206 /*ARGSUSED*/
2207 static hxge_status_t
2208 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2209     p_hxge_dma_common_t *dmap, size_t size)
2210 {
2211 	p_hxge_dma_common_t	tx_dmap;
2212 	hxge_status_t		status = HXGE_OK;
2213 
2214 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2215 
2216 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2217 	    KM_SLEEP);
2218 
2219 	tx_dmap->contig_alloc_type = B_FALSE;
2220 
2221 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2222 	    &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2223 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2224 	if (status != HXGE_OK) {
2225 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2226 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2227 		    " for size: %d", size));
2228 		goto hxge_alloc_tx_cntl_dma_fail1;
2229 	}
2230 
2231 	*dmap = tx_dmap;
2232 
2233 	goto hxge_alloc_tx_cntl_dma_exit;
2234 
2235 hxge_alloc_tx_cntl_dma_fail1:
2236 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2237 
2238 hxge_alloc_tx_cntl_dma_exit:
2239 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2240 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2241 
2242 	return (status);
2243 }
2244 
2245 /*ARGSUSED*/
2246 static void
2247 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2248 {
2249 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2250 
2251 	hxge_dma_mem_free(dmap);
2252 
2253 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2254 }
2255 
2256 static void
2257 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2258 {
2259 	uint32_t		i, ndmas;
2260 	p_hxge_dma_pool_t	dma_poolp;
2261 	p_hxge_dma_common_t	*dma_buf_p;
2262 	p_hxge_dma_pool_t	dma_cntl_poolp;
2263 	p_hxge_dma_common_t	*dma_cntl_p;
2264 	uint32_t		*num_chunks;
2265 
2266 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2267 
2268 	dma_poolp = hxgep->tx_buf_pool_p;
2269 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2270 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2271 		    "<== hxge_free_tx_mem_pool "
2272 		    "(null rx buf pool or buf not allocated"));
2273 		return;
2274 	}
2275 
2276 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2277 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2278 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2279 		    "<== hxge_free_tx_mem_pool "
2280 		    "(null tx cntl buf pool or cntl buf not allocated"));
2281 		return;
2282 	}
2283 
2284 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2285 	num_chunks = dma_poolp->num_chunks;
2286 
2287 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2288 	ndmas = dma_cntl_poolp->ndmas;
2289 
2290 	for (i = 0; i < ndmas; i++) {
2291 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2292 	}
2293 
2294 	for (i = 0; i < ndmas; i++) {
2295 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2296 	}
2297 
2298 	for (i = 0; i < ndmas; i++) {
2299 		KMEM_FREE(dma_buf_p[i],
2300 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2301 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2302 	}
2303 
2304 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2305 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2306 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2307 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2308 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2309 
2310 	hxgep->tx_buf_pool_p = NULL;
2311 	hxgep->tx_cntl_pool_p = NULL;
2312 
2313 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2314 }
2315 
2316 /*ARGSUSED*/
2317 static hxge_status_t
2318 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2319     struct ddi_dma_attr *dma_attrp,
2320     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2321     p_hxge_dma_common_t dma_p)
2322 {
2323 	caddr_t		kaddrp;
2324 	int		ddi_status = DDI_SUCCESS;
2325 
2326 	dma_p->dma_handle = NULL;
2327 	dma_p->acc_handle = NULL;
2328 	dma_p->kaddrp = NULL;
2329 
2330 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2331 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2332 	if (ddi_status != DDI_SUCCESS) {
2333 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2334 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2335 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2336 	}
2337 
2338 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2339 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2340 	    &dma_p->acc_handle);
2341 	if (ddi_status != DDI_SUCCESS) {
2342 		/* The caller will decide whether it is fatal */
2343 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2344 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2345 		ddi_dma_free_handle(&dma_p->dma_handle);
2346 		dma_p->dma_handle = NULL;
2347 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2348 	}
2349 
2350 	if (dma_p->alength < length) {
2351 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2352 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2353 		ddi_dma_mem_free(&dma_p->acc_handle);
2354 		ddi_dma_free_handle(&dma_p->dma_handle);
2355 		dma_p->acc_handle = NULL;
2356 		dma_p->dma_handle = NULL;
2357 		return (HXGE_ERROR);
2358 	}
2359 
2360 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2361 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2362 	    &dma_p->dma_cookie, &dma_p->ncookies);
2363 	if (ddi_status != DDI_DMA_MAPPED) {
2364 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2365 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2366 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2367 		if (dma_p->acc_handle) {
2368 			ddi_dma_mem_free(&dma_p->acc_handle);
2369 			dma_p->acc_handle = NULL;
2370 		}
2371 		ddi_dma_free_handle(&dma_p->dma_handle);
2372 		dma_p->dma_handle = NULL;
2373 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2374 	}
2375 
2376 	if (dma_p->ncookies != 1) {
2377 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2378 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2379 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2380 		if (dma_p->acc_handle) {
2381 			ddi_dma_mem_free(&dma_p->acc_handle);
2382 			dma_p->acc_handle = NULL;
2383 		}
2384 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2385 		ddi_dma_free_handle(&dma_p->dma_handle);
2386 		dma_p->dma_handle = NULL;
2387 		return (HXGE_ERROR);
2388 	}
2389 
2390 	dma_p->kaddrp = kaddrp;
2391 #if defined(__i386)
2392 	dma_p->ioaddr_pp =
2393 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2394 #else
2395 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2396 #endif
2397 
2398 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2399 
2400 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2401 	    "dma buffer allocated: dma_p $%p "
2402 	    "return dmac_ladress from cookie $%p dmac_size %d "
2403 	    "dma_p->ioaddr_p $%p "
2404 	    "dma_p->orig_ioaddr_p $%p "
2405 	    "orig_vatopa $%p "
2406 	    "alength %d (0x%x) "
2407 	    "kaddrp $%p "
2408 	    "length %d (0x%x)",
2409 	    dma_p,
2410 	    dma_p->dma_cookie.dmac_laddress,
2411 	    dma_p->dma_cookie.dmac_size,
2412 	    dma_p->ioaddr_pp,
2413 	    dma_p->orig_ioaddr_pp,
2414 	    dma_p->orig_vatopa,
2415 	    dma_p->alength, dma_p->alength,
2416 	    kaddrp,
2417 	    length, length));
2418 
2419 	return (HXGE_OK);
2420 }
2421 
2422 static void
2423 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2424 {
2425 	if (dma_p == NULL)
2426 		return;
2427 
2428 	if (dma_p->dma_handle != NULL) {
2429 		if (dma_p->ncookies) {
2430 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2431 			dma_p->ncookies = 0;
2432 		}
2433 		ddi_dma_free_handle(&dma_p->dma_handle);
2434 		dma_p->dma_handle = NULL;
2435 	}
2436 
2437 	if (dma_p->acc_handle != NULL) {
2438 		ddi_dma_mem_free(&dma_p->acc_handle);
2439 		dma_p->acc_handle = NULL;
2440 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2441 	}
2442 
2443 	dma_p->kaddrp = NULL;
2444 	dma_p->alength = NULL;
2445 }
2446 
2447 /*
2448  *	hxge_m_start() -- start transmitting and receiving.
2449  *
2450  *	This function is called by the MAC layer when the first
2451  *	stream is open to prepare the hardware ready for sending
2452  *	and transmitting packets.
2453  */
2454 static int
2455 hxge_m_start(void *arg)
2456 {
2457 	p_hxge_t hxgep = (p_hxge_t)arg;
2458 
2459 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2460 
2461 	MUTEX_ENTER(hxgep->genlock);
2462 
2463 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2464 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2465 		    "<== hxge_m_start: initialization failed"));
2466 		MUTEX_EXIT(hxgep->genlock);
2467 		return (EIO);
2468 	}
2469 
2470 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2471 		/*
2472 		 * Start timer to check the system error and tx hangs
2473 		 */
2474 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2475 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2476 
2477 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2478 
2479 		hxgep->timeout.link_status = 0;
2480 		hxgep->timeout.report_link_status = B_TRUE;
2481 		hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2482 
2483 		/* Start the link status timer to check the link status */
2484 		MUTEX_ENTER(&hxgep->timeout.lock);
2485 		hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2486 		    hxgep->timeout.ticks);
2487 		MUTEX_EXIT(&hxgep->timeout.lock);
2488 	}
2489 
2490 	MUTEX_EXIT(hxgep->genlock);
2491 
2492 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2493 
2494 	return (0);
2495 }
2496 
2497 /*
2498  * hxge_m_stop(): stop transmitting and receiving.
2499  */
2500 static void
2501 hxge_m_stop(void *arg)
2502 {
2503 	p_hxge_t hxgep = (p_hxge_t)arg;
2504 
2505 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2506 
2507 	if (hxgep->hxge_timerid) {
2508 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2509 		hxgep->hxge_timerid = 0;
2510 	}
2511 
2512 	/* Stop the link status timer before unregistering */
2513 	MUTEX_ENTER(&hxgep->timeout.lock);
2514 	if (hxgep->timeout.id) {
2515 		(void) untimeout(hxgep->timeout.id);
2516 		hxgep->timeout.id = 0;
2517 	}
2518 	hxge_link_update(hxgep, LINK_STATE_DOWN);
2519 	MUTEX_EXIT(&hxgep->timeout.lock);
2520 
2521 	MUTEX_ENTER(hxgep->genlock);
2522 
2523 	hxge_uninit(hxgep);
2524 
2525 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2526 
2527 	MUTEX_EXIT(hxgep->genlock);
2528 
2529 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2530 }
2531 
2532 static int
2533 hxge_m_unicst(void *arg, const uint8_t *macaddr)
2534 {
2535 	p_hxge_t		hxgep = (p_hxge_t)arg;
2536 	struct ether_addr	addrp;
2537 	hxge_status_t		status;
2538 
2539 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
2540 
2541 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
2542 
2543 	status = hxge_set_mac_addr(hxgep, &addrp);
2544 	if (status != HXGE_OK) {
2545 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2546 		    "<== hxge_m_unicst: set unitcast failed"));
2547 		return (EINVAL);
2548 	}
2549 
2550 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
2551 
2552 	return (0);
2553 }
2554 
2555 static int
2556 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2557 {
2558 	p_hxge_t		hxgep = (p_hxge_t)arg;
2559 	struct ether_addr	addrp;
2560 
2561 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2562 
2563 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2564 
2565 	if (add) {
2566 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2567 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2568 			    "<== hxge_m_multicst: add multicast failed"));
2569 			return (EINVAL);
2570 		}
2571 	} else {
2572 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2573 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2574 			    "<== hxge_m_multicst: del multicast failed"));
2575 			return (EINVAL);
2576 		}
2577 	}
2578 
2579 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2580 
2581 	return (0);
2582 }
2583 
2584 static int
2585 hxge_m_promisc(void *arg, boolean_t on)
2586 {
2587 	p_hxge_t hxgep = (p_hxge_t)arg;
2588 
2589 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2590 
2591 	if (hxge_set_promisc(hxgep, on)) {
2592 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2593 		    "<== hxge_m_promisc: set promisc failed"));
2594 		return (EINVAL);
2595 	}
2596 
2597 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2598 
2599 	return (0);
2600 }
2601 
2602 static void
2603 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2604 {
2605 	p_hxge_t	hxgep = (p_hxge_t)arg;
2606 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2607 	boolean_t	need_privilege;
2608 	int		err;
2609 	int		cmd;
2610 
2611 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2612 
2613 	iocp = (struct iocblk *)mp->b_rptr;
2614 	iocp->ioc_error = 0;
2615 	need_privilege = B_TRUE;
2616 	cmd = iocp->ioc_cmd;
2617 
2618 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2619 	switch (cmd) {
2620 	default:
2621 		miocnak(wq, mp, 0, EINVAL);
2622 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2623 		return;
2624 
2625 	case LB_GET_INFO_SIZE:
2626 	case LB_GET_INFO:
2627 	case LB_GET_MODE:
2628 		need_privilege = B_FALSE;
2629 		break;
2630 
2631 	case LB_SET_MODE:
2632 		break;
2633 
2634 	case ND_GET:
2635 		need_privilege = B_FALSE;
2636 		break;
2637 	case ND_SET:
2638 		break;
2639 
2640 	case HXGE_GET64:
2641 	case HXGE_PUT64:
2642 	case HXGE_GET_TX_RING_SZ:
2643 	case HXGE_GET_TX_DESC:
2644 	case HXGE_TX_SIDE_RESET:
2645 	case HXGE_RX_SIDE_RESET:
2646 	case HXGE_GLOBAL_RESET:
2647 	case HXGE_RESET_MAC:
2648 	case HXGE_PUT_TCAM:
2649 	case HXGE_GET_TCAM:
2650 	case HXGE_RTRACE:
2651 
2652 		need_privilege = B_FALSE;
2653 		break;
2654 	}
2655 
2656 	if (need_privilege) {
2657 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2658 		if (err != 0) {
2659 			miocnak(wq, mp, 0, err);
2660 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2661 			    "<== hxge_m_ioctl: no priv"));
2662 			return;
2663 		}
2664 	}
2665 
2666 	switch (cmd) {
2667 	case ND_GET:
2668 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2669 	case ND_SET:
2670 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2671 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2672 		break;
2673 
2674 	case LB_GET_MODE:
2675 	case LB_SET_MODE:
2676 	case LB_GET_INFO_SIZE:
2677 	case LB_GET_INFO:
2678 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2679 		break;
2680 
2681 	case HXGE_PUT_TCAM:
2682 	case HXGE_GET_TCAM:
2683 	case HXGE_GET64:
2684 	case HXGE_PUT64:
2685 	case HXGE_GET_TX_RING_SZ:
2686 	case HXGE_GET_TX_DESC:
2687 	case HXGE_TX_SIDE_RESET:
2688 	case HXGE_RX_SIDE_RESET:
2689 	case HXGE_GLOBAL_RESET:
2690 	case HXGE_RESET_MAC:
2691 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2692 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2693 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2694 		break;
2695 	}
2696 
2697 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2698 }
2699 
2700 extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
2701 
2702 static void
2703 hxge_m_resources(void *arg)
2704 {
2705 	p_hxge_t hxgep = arg;
2706 	mac_rx_fifo_t mrf;
2707 	p_rx_rcr_rings_t rcr_rings;
2708 	p_rx_rcr_ring_t *rcr_p;
2709 	p_rx_rcr_ring_t rcrp;
2710 	uint32_t i, ndmas;
2711 	int status;
2712 
2713 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources"));
2714 
2715 	MUTEX_ENTER(hxgep->genlock);
2716 
2717 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2718 		status = hxge_init(hxgep);
2719 		if (status != HXGE_OK) {
2720 			HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: "
2721 			    "hxge_init failed"));
2722 			MUTEX_EXIT(hxgep->genlock);
2723 			return;
2724 		}
2725 	}
2726 
2727 	mrf.mrf_type = MAC_RX_FIFO;
2728 	mrf.mrf_blank = hxge_rx_hw_blank;
2729 	mrf.mrf_arg = (void *)hxgep;
2730 
2731 	mrf.mrf_normal_blank_time = RXDMA_RCR_TO_DEFAULT;
2732 	mrf.mrf_normal_pkt_count = RXDMA_RCR_PTHRES_DEFAULT;
2733 
2734 	rcr_rings = hxgep->rx_rcr_rings;
2735 	rcr_p = rcr_rings->rcr_rings;
2736 	ndmas = rcr_rings->ndmas;
2737 
2738 	/*
2739 	 * Export our receive resources to the MAC layer.
2740 	 */
2741 	for (i = 0; i < ndmas; i++) {
2742 		rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i];
2743 		rcrp->rcr_mac_handle =
2744 		    mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf);
2745 
2746 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2747 		    "==> hxge_m_resources: vdma %d dma %d "
2748 		    "rcrptr 0x%016llx mac_handle 0x%016llx",
2749 		    i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle));
2750 	}
2751 
2752 	MUTEX_EXIT(hxgep->genlock);
2753 
2754 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources"));
2755 }
2756 
2757 /*
2758  * Set an alternate MAC address
2759  */
2760 static int
2761 hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot)
2762 {
2763 	uint64_t	address;
2764 	uint64_t	tmp;
2765 	hpi_status_t	status;
2766 	uint8_t		addrn;
2767 	int		i;
2768 
2769 	/*
2770 	 * Convert a byte array to a 48 bit value.
2771 	 * Need to check endianess if in doubt
2772 	 */
2773 	address = 0;
2774 	for (i = 0; i < ETHERADDRL; i++) {
2775 		tmp = maddr[i];
2776 		address <<= 8;
2777 		address |= tmp;
2778 	}
2779 
2780 	addrn = (uint8_t)slot;
2781 	status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address);
2782 	if (status != HPI_SUCCESS)
2783 		return (EIO);
2784 
2785 	return (0);
2786 }
2787 
2788 static void
2789 hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot)
2790 {
2791 	p_hxge_mmac_stats_t	mmac_stats;
2792 	int			i;
2793 	hxge_mmac_t		*mmac_info;
2794 
2795 	mmac_info = &hxgep->hxge_mmac_info;
2796 	mmac_stats = &hxgep->statsp->mmac_stats;
2797 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
2798 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
2799 
2800 	for (i = 0; i < ETHERADDRL; i++) {
2801 		mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] =
2802 		    mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
2803 	}
2804 }
2805 
2806 /*
2807  * Find an unused address slot, set the address value to the one specified,
2808  * enable the port to start filtering on the new MAC address.
2809  * Returns: 0 on success.
2810  */
2811 int
2812 hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
2813 {
2814 	p_hxge_t	hxgep = arg;
2815 	mac_addr_slot_t	slot;
2816 	hxge_mmac_t	*mmac_info;
2817 	int		err;
2818 	hxge_status_t	status;
2819 
2820 	mutex_enter(hxgep->genlock);
2821 
2822 	/*
2823 	 * Make sure that hxge is initialized, if _start() has
2824 	 * not been called.
2825 	 */
2826 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2827 		status = hxge_init(hxgep);
2828 		if (status != HXGE_OK) {
2829 			mutex_exit(hxgep->genlock);
2830 			return (ENXIO);
2831 		}
2832 	}
2833 
2834 	mmac_info = &hxgep->hxge_mmac_info;
2835 	if (mmac_info->naddrfree == 0) {
2836 		mutex_exit(hxgep->genlock);
2837 		return (ENOSPC);
2838 	}
2839 
2840 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2841 	    maddr->mma_addrlen)) {
2842 		mutex_exit(hxgep->genlock);
2843 		return (EINVAL);
2844 	}
2845 
2846 	/*
2847 	 * Search for the first available slot. Because naddrfree
2848 	 * is not zero, we are guaranteed to find one.
2849 	 * Slot 0 is for unique (primary) MAC.  The first alternate
2850 	 * MAC slot is slot 1.
2851 	 */
2852 	for (slot = 1; slot < mmac_info->num_mmac; slot++) {
2853 		if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
2854 			break;
2855 	}
2856 
2857 	ASSERT(slot < mmac_info->num_mmac);
2858 	if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) {
2859 		mutex_exit(hxgep->genlock);
2860 		return (err);
2861 	}
2862 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
2863 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
2864 	mmac_info->naddrfree--;
2865 	hxge_mmac_kstat_update(hxgep, slot);
2866 
2867 	maddr->mma_slot = slot;
2868 
2869 	mutex_exit(hxgep->genlock);
2870 	return (0);
2871 }
2872 
2873 /*
2874  * Remove the specified mac address and update
2875  * the h/w not to filter the mac address anymore.
2876  * Returns: 0, on success.
2877  */
2878 int
2879 hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
2880 {
2881 	p_hxge_t	hxgep = arg;
2882 	hxge_mmac_t	*mmac_info;
2883 	int		err = 0;
2884 	hxge_status_t	status;
2885 
2886 	mutex_enter(hxgep->genlock);
2887 
2888 	/*
2889 	 * Make sure that hxge is initialized, if _start() has
2890 	 * not been called.
2891 	 */
2892 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2893 		status = hxge_init(hxgep);
2894 		if (status != HXGE_OK) {
2895 			mutex_exit(hxgep->genlock);
2896 			return (ENXIO);
2897 		}
2898 	}
2899 
2900 	mmac_info = &hxgep->hxge_mmac_info;
2901 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2902 		mutex_exit(hxgep->genlock);
2903 		return (EINVAL);
2904 	}
2905 
2906 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2907 		if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) ==
2908 		    HPI_SUCCESS) {
2909 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
2910 			mmac_info->naddrfree++;
2911 			/*
2912 			 * Clear mac_pool[slot].addr so that kstat shows 0
2913 			 * alternate MAC address if the slot is not used.
2914 			 */
2915 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
2916 			hxge_mmac_kstat_update(hxgep, slot);
2917 		} else {
2918 			err = EIO;
2919 		}
2920 	} else {
2921 		err = EINVAL;
2922 	}
2923 
2924 	mutex_exit(hxgep->genlock);
2925 	return (err);
2926 }
2927 
2928 /*
2929  * Modify a mac address added by hxge_mmac_add().
2930  * Returns: 0, on success.
2931  */
2932 int
2933 hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
2934 {
2935 	p_hxge_t	hxgep = arg;
2936 	mac_addr_slot_t	slot;
2937 	hxge_mmac_t	*mmac_info;
2938 	int		err = 0;
2939 	hxge_status_t	status;
2940 
2941 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2942 	    maddr->mma_addrlen))
2943 		return (EINVAL);
2944 
2945 	slot = maddr->mma_slot;
2946 
2947 	mutex_enter(hxgep->genlock);
2948 
2949 	/*
2950 	 * Make sure that hxge is initialized, if _start() has
2951 	 * not been called.
2952 	 */
2953 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2954 		status = hxge_init(hxgep);
2955 		if (status != HXGE_OK) {
2956 			mutex_exit(hxgep->genlock);
2957 			return (ENXIO);
2958 		}
2959 	}
2960 
2961 	mmac_info = &hxgep->hxge_mmac_info;
2962 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2963 		mutex_exit(hxgep->genlock);
2964 		return (EINVAL);
2965 	}
2966 
2967 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2968 		if ((err = hxge_altmac_set(hxgep, maddr->mma_addr,
2969 		    slot)) == 0) {
2970 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
2971 			    ETHERADDRL);
2972 			hxge_mmac_kstat_update(hxgep, slot);
2973 		}
2974 	} else {
2975 		err = EINVAL;
2976 	}
2977 
2978 	mutex_exit(hxgep->genlock);
2979 	return (err);
2980 }
2981 
2982 /*
2983  * static int
2984  * hxge_m_mmac_get() - Get the MAC address and other information
2985  *	related to the slot.  mma_flags should be set to 0 in the call.
2986  *	Note: although kstat shows MAC address as zero when a slot is
2987  *	not used, Crossbow expects hxge_m_mmac_get to copy factory MAC
2988  *	to the caller as long as the slot is not using a user MAC address.
2989  *	The following table shows the rules,
2990  *
2991  *     					USED    VENDOR    mma_addr
2992  *	------------------------------------------------------------
2993  *	(1) Slot uses a user MAC:	yes      no     user MAC
2994  *	(2) Slot uses a factory MAC:    yes      yes    factory MAC
2995  *	(3) Slot is not used but is
2996  *	     factory MAC capable:	no       yes    factory MAC
2997  *	(4) Slot is not used and is
2998  *	     not factory MAC capable:   no       no	0
2999  *	------------------------------------------------------------
3000  */
3001 int
3002 hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
3003 {
3004 	hxge_t		*hxgep = arg;
3005 	mac_addr_slot_t	slot;
3006 	hxge_mmac_t	*mmac_info;
3007 	hxge_status_t	status;
3008 
3009 	slot = maddr->mma_slot;
3010 
3011 	mutex_enter(hxgep->genlock);
3012 
3013 	/*
3014 	 * Make sure that hxge is initialized, if _start() has
3015 	 * not been called.
3016 	 */
3017 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
3018 		status = hxge_init(hxgep);
3019 		if (status != HXGE_OK) {
3020 			mutex_exit(hxgep->genlock);
3021 			return (ENXIO);
3022 		}
3023 	}
3024 
3025 	mmac_info = &hxgep->hxge_mmac_info;
3026 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
3027 		mutex_exit(hxgep->genlock);
3028 		return (EINVAL);
3029 	}
3030 
3031 	maddr->mma_flags = 0;
3032 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3033 		maddr->mma_flags |= MMAC_SLOT_USED;
3034 		bcopy(mmac_info->mac_pool[slot].addr,
3035 		    maddr->mma_addr, ETHERADDRL);
3036 		maddr->mma_addrlen = ETHERADDRL;
3037 	}
3038 
3039 	mutex_exit(hxgep->genlock);
3040 	return (0);
3041 }
3042 
3043 /*ARGSUSED*/
3044 boolean_t
3045 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3046 {
3047 	p_hxge_t		hxgep = (p_hxge_t)arg;
3048 	uint32_t		*txflags = cap_data;
3049 	multiaddress_capab_t	*mmacp = cap_data;
3050 
3051 	switch (cap) {
3052 	case MAC_CAPAB_HCKSUM:
3053 		*txflags = HCKSUM_INET_PARTIAL;
3054 		break;
3055 
3056 	case MAC_CAPAB_POLL:
3057 		/*
3058 		 * There's nothing for us to fill in, simply returning B_TRUE
3059 		 * stating that we support polling is sufficient.
3060 		 */
3061 		break;
3062 
3063 	case MAC_CAPAB_MULTIADDRESS:
3064 		/*
3065 		 * The number of MAC addresses made available by
3066 		 * this capability is one less than the total as
3067 		 * the primary address in slot 0 is counted in
3068 		 * the total.
3069 		 */
3070 		mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1;
3071 		mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree;
3072 		mmacp->maddr_flag = 0;	/* No multiple factory macs */
3073 		mmacp->maddr_handle = hxgep;
3074 		mmacp->maddr_add = hxge_m_mmac_add;
3075 		mmacp->maddr_remove = hxge_m_mmac_remove;
3076 		mmacp->maddr_modify = hxge_m_mmac_modify;
3077 		mmacp->maddr_get = hxge_m_mmac_get;
3078 		mmacp->maddr_reserve = NULL;	/* No multiple factory macs */
3079 		break;
3080 	default:
3081 		return (B_FALSE);
3082 	}
3083 	return (B_TRUE);
3084 }
3085 
3086 static boolean_t
3087 hxge_param_locked(mac_prop_id_t pr_num)
3088 {
3089 	/*
3090 	 * All adv_* parameters are locked (read-only) while
3091 	 * the device is in any sort of loopback mode ...
3092 	 */
3093 	switch (pr_num) {
3094 		case MAC_PROP_ADV_1000FDX_CAP:
3095 		case MAC_PROP_EN_1000FDX_CAP:
3096 		case MAC_PROP_ADV_1000HDX_CAP:
3097 		case MAC_PROP_EN_1000HDX_CAP:
3098 		case MAC_PROP_ADV_100FDX_CAP:
3099 		case MAC_PROP_EN_100FDX_CAP:
3100 		case MAC_PROP_ADV_100HDX_CAP:
3101 		case MAC_PROP_EN_100HDX_CAP:
3102 		case MAC_PROP_ADV_10FDX_CAP:
3103 		case MAC_PROP_EN_10FDX_CAP:
3104 		case MAC_PROP_ADV_10HDX_CAP:
3105 		case MAC_PROP_EN_10HDX_CAP:
3106 		case MAC_PROP_AUTONEG:
3107 		case MAC_PROP_FLOWCTRL:
3108 			return (B_TRUE);
3109 	}
3110 	return (B_FALSE);
3111 }
3112 
3113 /*
3114  * callback functions for set/get of properties
3115  */
3116 static int
3117 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3118     uint_t pr_valsize, const void *pr_val)
3119 {
3120 	hxge_t		*hxgep = barg;
3121 	p_hxge_stats_t	statsp;
3122 	int		err = 0;
3123 	uint32_t	new_mtu, old_framesize, new_framesize;
3124 
3125 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3126 
3127 	statsp = hxgep->statsp;
3128 	mutex_enter(hxgep->genlock);
3129 	if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3130 	    hxge_param_locked(pr_num)) {
3131 		/*
3132 		 * All adv_* parameters are locked (read-only)
3133 		 * while the device is in any sort of loopback mode.
3134 		 */
3135 		HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3136 		    "==> hxge_m_setprop: loopback mode: read only"));
3137 		mutex_exit(hxgep->genlock);
3138 		return (EBUSY);
3139 	}
3140 
3141 	switch (pr_num) {
3142 		/*
3143 		 * These properties are either not exist or read only
3144 		 */
3145 		case MAC_PROP_EN_1000FDX_CAP:
3146 		case MAC_PROP_EN_100FDX_CAP:
3147 		case MAC_PROP_EN_10FDX_CAP:
3148 		case MAC_PROP_EN_1000HDX_CAP:
3149 		case MAC_PROP_EN_100HDX_CAP:
3150 		case MAC_PROP_EN_10HDX_CAP:
3151 		case MAC_PROP_ADV_1000FDX_CAP:
3152 		case MAC_PROP_ADV_1000HDX_CAP:
3153 		case MAC_PROP_ADV_100FDX_CAP:
3154 		case MAC_PROP_ADV_100HDX_CAP:
3155 		case MAC_PROP_ADV_10FDX_CAP:
3156 		case MAC_PROP_ADV_10HDX_CAP:
3157 		case MAC_PROP_STATUS:
3158 		case MAC_PROP_SPEED:
3159 		case MAC_PROP_DUPLEX:
3160 		case MAC_PROP_AUTONEG:
3161 		/*
3162 		 * Flow control is handled in the shared domain and
3163 		 * it is readonly here.
3164 		 */
3165 		case MAC_PROP_FLOWCTRL:
3166 			err = EINVAL;
3167 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3168 			    "==> hxge_m_setprop:  read only property %d",
3169 			    pr_num));
3170 			break;
3171 
3172 		case MAC_PROP_MTU:
3173 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3174 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3175 			    "==> hxge_m_setprop: set MTU: %d", new_mtu));
3176 
3177 			new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3178 			if (new_framesize == hxgep->vmac.maxframesize) {
3179 				err = 0;
3180 				break;
3181 			}
3182 
3183 			if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3184 				err = EBUSY;
3185 				break;
3186 			}
3187 
3188 			if (new_framesize < MIN_FRAME_SIZE ||
3189 			    new_framesize > MAX_FRAME_SIZE) {
3190 				err = EINVAL;
3191 				break;
3192 			}
3193 
3194 			old_framesize = hxgep->vmac.maxframesize;
3195 			hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3196 
3197 			if (hxge_vmac_set_framesize(hxgep)) {
3198 				hxgep->vmac.maxframesize =
3199 				    (uint16_t)old_framesize;
3200 				err = EINVAL;
3201 				break;
3202 			}
3203 
3204 			err = mac_maxsdu_update(hxgep->mach, new_mtu);
3205 			if (err) {
3206 				hxgep->vmac.maxframesize =
3207 				    (uint16_t)old_framesize;
3208 				(void) hxge_vmac_set_framesize(hxgep);
3209 			}
3210 
3211 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3212 			    "==> hxge_m_setprop: set MTU: %d maxframe %d",
3213 			    new_mtu, hxgep->vmac.maxframesize));
3214 			break;
3215 
3216 		case MAC_PROP_PRIVATE:
3217 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3218 			    "==> hxge_m_setprop: private property"));
3219 			err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3220 			    pr_val);
3221 			break;
3222 
3223 		default:
3224 			err = ENOTSUP;
3225 			break;
3226 	}
3227 
3228 	mutex_exit(hxgep->genlock);
3229 
3230 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3231 	    "<== hxge_m_setprop (return %d)", err));
3232 
3233 	return (err);
3234 }
3235 
3236 /* ARGSUSED */
3237 static int
3238 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
3239     void *pr_val)
3240 {
3241 	int		err = 0;
3242 	link_flowctrl_t	fl;
3243 
3244 	switch (pr_num) {
3245 	case MAC_PROP_DUPLEX:
3246 		*(uint8_t *)pr_val = 2;
3247 		break;
3248 	case MAC_PROP_AUTONEG:
3249 		*(uint8_t *)pr_val = 0;
3250 		break;
3251 	case MAC_PROP_FLOWCTRL:
3252 		if (pr_valsize < sizeof (link_flowctrl_t))
3253 			return (EINVAL);
3254 		fl = LINK_FLOWCTRL_TX;
3255 		bcopy(&fl, pr_val, sizeof (fl));
3256 		break;
3257 	default:
3258 		err = ENOTSUP;
3259 		break;
3260 	}
3261 	return (err);
3262 }
3263 
3264 static int
3265 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3266     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3267 {
3268 	hxge_t 		*hxgep = barg;
3269 	p_hxge_stats_t	statsp = hxgep->statsp;
3270 	int		err = 0;
3271 	link_flowctrl_t fl;
3272 	uint64_t	tmp = 0;
3273 	link_state_t	ls;
3274 
3275 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3276 	    "==> hxge_m_getprop: pr_num %d", pr_num));
3277 
3278 	if (pr_valsize == 0)
3279 		return (EINVAL);
3280 
3281 	*perm = MAC_PROP_PERM_RW;
3282 
3283 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3284 		err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val);
3285 		return (err);
3286 	}
3287 
3288 	bzero(pr_val, pr_valsize);
3289 	switch (pr_num) {
3290 		case MAC_PROP_DUPLEX:
3291 			*perm = MAC_PROP_PERM_READ;
3292 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3293 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3294 			    "==> hxge_m_getprop: duplex mode %d",
3295 			    *(uint8_t *)pr_val));
3296 			break;
3297 
3298 		case MAC_PROP_SPEED:
3299 			*perm = MAC_PROP_PERM_READ;
3300 			if (pr_valsize < sizeof (uint64_t))
3301 				return (EINVAL);
3302 			tmp = statsp->mac_stats.link_speed * 1000000ull;
3303 			bcopy(&tmp, pr_val, sizeof (tmp));
3304 			break;
3305 
3306 		case MAC_PROP_STATUS:
3307 			*perm = MAC_PROP_PERM_READ;
3308 			if (pr_valsize < sizeof (link_state_t))
3309 				return (EINVAL);
3310 			if (!statsp->mac_stats.link_up)
3311 				ls = LINK_STATE_DOWN;
3312 			else
3313 				ls = LINK_STATE_UP;
3314 			bcopy(&ls, pr_val, sizeof (ls));
3315 			break;
3316 
3317 		case MAC_PROP_FLOWCTRL:
3318 			/*
3319 			 * Flow control is supported by the shared domain and
3320 			 * it is currently transmit only
3321 			 */
3322 			*perm = MAC_PROP_PERM_READ;
3323 			if (pr_valsize < sizeof (link_flowctrl_t))
3324 				return (EINVAL);
3325 			fl = LINK_FLOWCTRL_TX;
3326 			bcopy(&fl, pr_val, sizeof (fl));
3327 			break;
3328 		case MAC_PROP_AUTONEG:
3329 			/* 10G link only and it is not negotiable */
3330 			*perm = MAC_PROP_PERM_READ;
3331 			*(uint8_t *)pr_val = 0;
3332 			break;
3333 		case MAC_PROP_ADV_1000FDX_CAP:
3334 		case MAC_PROP_ADV_100FDX_CAP:
3335 		case MAC_PROP_ADV_10FDX_CAP:
3336 		case MAC_PROP_ADV_1000HDX_CAP:
3337 		case MAC_PROP_ADV_100HDX_CAP:
3338 		case MAC_PROP_ADV_10HDX_CAP:
3339 		case MAC_PROP_EN_1000FDX_CAP:
3340 		case MAC_PROP_EN_100FDX_CAP:
3341 		case MAC_PROP_EN_10FDX_CAP:
3342 		case MAC_PROP_EN_1000HDX_CAP:
3343 		case MAC_PROP_EN_100HDX_CAP:
3344 		case MAC_PROP_EN_10HDX_CAP:
3345 			err = ENOTSUP;
3346 			break;
3347 
3348 		case MAC_PROP_PRIVATE:
3349 			err = hxge_get_priv_prop(hxgep, pr_name, pr_flags,
3350 			    pr_valsize, pr_val);
3351 			break;
3352 		default:
3353 			err = EINVAL;
3354 			break;
3355 	}
3356 
3357 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3358 
3359 	return (err);
3360 }
3361 
3362 /* ARGSUSED */
3363 static int
3364 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3365     const void *pr_val)
3366 {
3367 	p_hxge_param_t	param_arr = hxgep->param_arr;
3368 	int		err = 0;
3369 
3370 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3371 	    "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3372 
3373 	if (pr_val == NULL) {
3374 		return (EINVAL);
3375 	}
3376 
3377 	/* Blanking */
3378 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3379 		err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3380 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3381 	} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3382 		err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3383 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3384 
3385 	/* Classification */
3386 	} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3387 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3388 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3389 	} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3390 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3391 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3392 	} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3393 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3394 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3395 	} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3396 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3397 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3398 	} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3399 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3400 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3401 	} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3402 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3403 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3404 	} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3405 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3406 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3407 	} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3408 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3409 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3410 	} else {
3411 		err = EINVAL;
3412 	}
3413 
3414 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3415 	    "<== hxge_set_priv_prop: err %d", err));
3416 
3417 	return (err);
3418 }
3419 
3420 static int
3421 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags,
3422     uint_t pr_valsize, void *pr_val)
3423 {
3424 	p_hxge_param_t	param_arr = hxgep->param_arr;
3425 	char		valstr[MAXNAMELEN];
3426 	int		err = 0;
3427 	uint_t		strsize;
3428 	int		value = 0;
3429 
3430 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3431 	    "==> hxge_get_priv_prop: property %s", pr_name));
3432 
3433 	if (pr_flags & MAC_PROP_DEFAULT) {
3434 		/* Receive Interrupt Blanking Parameters */
3435 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3436 			value = RXDMA_RCR_TO_DEFAULT;
3437 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3438 			value = RXDMA_RCR_PTHRES_DEFAULT;
3439 
3440 		/* Classification and Load Distribution Configuration */
3441 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3442 		    strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3443 		    strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3444 		    strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3445 		    strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3446 		    strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3447 		    strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3448 		    strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3449 			value = HXGE_CLASS_TCAM_LOOKUP;
3450 		} else {
3451 			err = EINVAL;
3452 		}
3453 	} else {
3454 		/* Receive Interrupt Blanking Parameters */
3455 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3456 			value = hxgep->intr_timeout;
3457 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3458 			value = hxgep->intr_threshold;
3459 
3460 		/* Classification and Load Distribution Configuration */
3461 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3462 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3463 			    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3464 
3465 			value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3466 		} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3467 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3468 			    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3469 
3470 			value = (int)param_arr[param_class_opt_ipv4_udp].value;
3471 		} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3472 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3473 			    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3474 
3475 			value = (int)param_arr[param_class_opt_ipv4_ah].value;
3476 		} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3477 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3478 			    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3479 
3480 			value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3481 		} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3482 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3483 			    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3484 
3485 			value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3486 		} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3487 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3488 			    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3489 
3490 			value = (int)param_arr[param_class_opt_ipv6_udp].value;
3491 		} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3492 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3493 			    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3494 
3495 			value = (int)param_arr[param_class_opt_ipv6_ah].value;
3496 		} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3497 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3498 			    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3499 
3500 			value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3501 		} else {
3502 			err = EINVAL;
3503 		}
3504 	}
3505 
3506 	if (err == 0) {
3507 		(void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3508 
3509 		strsize = (uint_t)strlen(valstr);
3510 		if (pr_valsize < strsize) {
3511 			err = ENOBUFS;
3512 		} else {
3513 			(void) strlcpy(pr_val, valstr, pr_valsize);
3514 		}
3515 	}
3516 
3517 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3518 	    "<== hxge_get_priv_prop: return %d", err));
3519 
3520 	return (err);
3521 }
3522 /*
3523  * Module loading and removing entry points.
3524  */
3525 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3526     nodev, NULL, D_MP, NULL, NULL);
3527 
3528 extern struct mod_ops mod_driverops;
3529 
3530 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
3531 
3532 /*
3533  * Module linkage information for the kernel.
3534  */
3535 static struct modldrv hxge_modldrv = {
3536 	&mod_driverops,
3537 	HXGE_DESC_VER,
3538 	&hxge_dev_ops
3539 };
3540 
3541 static struct modlinkage modlinkage = {
3542 	MODREV_1, (void *) &hxge_modldrv, NULL
3543 };
3544 
3545 int
3546 _init(void)
3547 {
3548 	int status;
3549 
3550 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3551 	mac_init_ops(&hxge_dev_ops, "hxge");
3552 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3553 	if (status != 0) {
3554 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3555 		    "failed to init device soft state"));
3556 		mac_fini_ops(&hxge_dev_ops);
3557 		goto _init_exit;
3558 	}
3559 
3560 	status = mod_install(&modlinkage);
3561 	if (status != 0) {
3562 		ddi_soft_state_fini(&hxge_list);
3563 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3564 		goto _init_exit;
3565 	}
3566 
3567 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3568 
3569 _init_exit:
3570 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3571 
3572 	return (status);
3573 }
3574 
3575 int
3576 _fini(void)
3577 {
3578 	int status;
3579 
3580 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3581 
3582 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3583 
3584 	if (hxge_mblks_pending)
3585 		return (EBUSY);
3586 
3587 	status = mod_remove(&modlinkage);
3588 	if (status != DDI_SUCCESS) {
3589 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
3590 		    "Module removal failed 0x%08x", status));
3591 		goto _fini_exit;
3592 	}
3593 
3594 	mac_fini_ops(&hxge_dev_ops);
3595 
3596 	ddi_soft_state_fini(&hxge_list);
3597 
3598 	MUTEX_DESTROY(&hxge_common_lock);
3599 
3600 _fini_exit:
3601 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3602 
3603 	return (status);
3604 }
3605 
3606 int
3607 _info(struct modinfo *modinfop)
3608 {
3609 	int status;
3610 
3611 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3612 	status = mod_info(&modlinkage, modinfop);
3613 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3614 
3615 	return (status);
3616 }
3617 
3618 /*ARGSUSED*/
3619 hxge_status_t
3620 hxge_add_intrs(p_hxge_t hxgep)
3621 {
3622 	int		intr_types;
3623 	int		type = 0;
3624 	int		ddi_status = DDI_SUCCESS;
3625 	hxge_status_t	status = HXGE_OK;
3626 
3627 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3628 
3629 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
3630 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3631 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
3632 	hxgep->hxge_intr_type.intr_added = 0;
3633 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3634 	hxgep->hxge_intr_type.intr_type = 0;
3635 
3636 	if (hxge_msi_enable) {
3637 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3638 	}
3639 
3640 	/* Get the supported interrupt types */
3641 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3642 	    != DDI_SUCCESS) {
3643 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3644 		    "ddi_intr_get_supported_types failed: status 0x%08x",
3645 		    ddi_status));
3646 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3647 	}
3648 
3649 	hxgep->hxge_intr_type.intr_types = intr_types;
3650 
3651 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3652 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3653 
3654 	/*
3655 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3656 	 *	(1): 1 - MSI
3657 	 *	(2): 2 - MSI-X
3658 	 *	others - FIXED
3659 	 */
3660 	switch (hxge_msi_enable) {
3661 	default:
3662 		type = DDI_INTR_TYPE_FIXED;
3663 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3664 		    "use fixed (intx emulation) type %08x", type));
3665 		break;
3666 
3667 	case 2:
3668 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3669 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3670 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3671 			type = DDI_INTR_TYPE_MSIX;
3672 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3673 			    "==> hxge_add_intrs: "
3674 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3675 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3676 			type = DDI_INTR_TYPE_MSI;
3677 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3678 			    "==> hxge_add_intrs: "
3679 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3680 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3681 			type = DDI_INTR_TYPE_FIXED;
3682 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3683 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3684 		}
3685 		break;
3686 
3687 	case 1:
3688 		if (intr_types & DDI_INTR_TYPE_MSI) {
3689 			type = DDI_INTR_TYPE_MSI;
3690 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3691 			    "==> hxge_add_intrs: "
3692 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3693 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3694 			type = DDI_INTR_TYPE_MSIX;
3695 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3696 			    "==> hxge_add_intrs: "
3697 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3698 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3699 			type = DDI_INTR_TYPE_FIXED;
3700 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3701 			    "==> hxge_add_intrs: "
3702 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3703 		}
3704 	}
3705 
3706 	hxgep->hxge_intr_type.intr_type = type;
3707 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3708 	    type == DDI_INTR_TYPE_FIXED) &&
3709 	    hxgep->hxge_intr_type.niu_msi_enable) {
3710 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3711 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3712 			    " hxge_add_intrs: "
3713 			    " hxge_add_intrs_adv failed: status 0x%08x",
3714 			    status));
3715 			return (status);
3716 		} else {
3717 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3718 			    "interrupts registered : type %d", type));
3719 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3720 
3721 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3722 			    "\nAdded advanced hxge add_intr_adv "
3723 			    "intr type 0x%x\n", type));
3724 
3725 			return (status);
3726 		}
3727 	}
3728 
3729 	if (!hxgep->hxge_intr_type.intr_registered) {
3730 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3731 		    "==> hxge_add_intrs: failed to register interrupts"));
3732 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3733 	}
3734 
3735 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3736 
3737 	return (status);
3738 }
3739 
3740 /*ARGSUSED*/
3741 static hxge_status_t
3742 hxge_add_soft_intrs(p_hxge_t hxgep)
3743 {
3744 	int		ddi_status = DDI_SUCCESS;
3745 	hxge_status_t	status = HXGE_OK;
3746 
3747 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
3748 
3749 	hxgep->resched_id = NULL;
3750 	hxgep->resched_running = B_FALSE;
3751 	ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
3752 	    &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
3753 	if (ddi_status != DDI_SUCCESS) {
3754 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
3755 		    "ddi_add_softintrs failed: status 0x%08x", ddi_status));
3756 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3757 	}
3758 
3759 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
3760 
3761 	return (status);
3762 }
3763 
3764 /*ARGSUSED*/
3765 static hxge_status_t
3766 hxge_add_intrs_adv(p_hxge_t hxgep)
3767 {
3768 	int		intr_type;
3769 	p_hxge_intr_t	intrp;
3770 	hxge_status_t	status;
3771 
3772 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3773 
3774 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3775 	intr_type = intrp->intr_type;
3776 
3777 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3778 	    intr_type));
3779 
3780 	switch (intr_type) {
3781 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3782 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3783 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3784 		break;
3785 
3786 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3787 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3788 		break;
3789 
3790 	default:
3791 		status = HXGE_ERROR;
3792 		break;
3793 	}
3794 
3795 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3796 
3797 	return (status);
3798 }
3799 
3800 /*ARGSUSED*/
3801 static hxge_status_t
3802 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3803 {
3804 	dev_info_t	*dip = hxgep->dip;
3805 	p_hxge_ldg_t	ldgp;
3806 	p_hxge_intr_t	intrp;
3807 	uint_t		*inthandler;
3808 	void		*arg1, *arg2;
3809 	int		behavior;
3810 	int		nintrs, navail;
3811 	int		nactual, nrequired;
3812 	int		inum = 0;
3813 	int		loop = 0;
3814 	int		x, y;
3815 	int		ddi_status = DDI_SUCCESS;
3816 	hxge_status_t	status = HXGE_OK;
3817 
3818 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3819 
3820 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3821 
3822 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3823 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3824 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3825 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3826 		    "nintrs: %d", ddi_status, nintrs));
3827 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3828 	}
3829 
3830 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3831 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3832 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3833 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3834 		    "nintrs: %d", ddi_status, navail));
3835 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3836 	}
3837 
3838 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3839 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3840 	    int_type, nintrs, navail));
3841 
3842 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3843 		/* MSI must be power of 2 */
3844 		if ((navail & 16) == 16) {
3845 			navail = 16;
3846 		} else if ((navail & 8) == 8) {
3847 			navail = 8;
3848 		} else if ((navail & 4) == 4) {
3849 			navail = 4;
3850 		} else if ((navail & 2) == 2) {
3851 			navail = 2;
3852 		} else {
3853 			navail = 1;
3854 		}
3855 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3856 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3857 		    "navail %d", nintrs, navail));
3858 	}
3859 
3860 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3861 	    "requesting: intr type %d nintrs %d, navail %d",
3862 	    int_type, nintrs, navail));
3863 
3864 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3865 	    DDI_INTR_ALLOC_NORMAL);
3866 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3867 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3868 
3869 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3870 	    navail, &nactual, behavior);
3871 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3872 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3873 		    " ddi_intr_alloc() failed: %d", ddi_status));
3874 		kmem_free(intrp->htable, intrp->intr_size);
3875 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3876 	}
3877 
3878 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3879 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3880 	    navail, nactual));
3881 
3882 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3883 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3884 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3885 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3886 		/* Free already allocated interrupts */
3887 		for (y = 0; y < nactual; y++) {
3888 			(void) ddi_intr_free(intrp->htable[y]);
3889 		}
3890 
3891 		kmem_free(intrp->htable, intrp->intr_size);
3892 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3893 	}
3894 
3895 	nrequired = 0;
3896 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3897 	if (status != HXGE_OK) {
3898 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3899 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3900 		    "failed: 0x%x", status));
3901 		/* Free already allocated interrupts */
3902 		for (y = 0; y < nactual; y++) {
3903 			(void) ddi_intr_free(intrp->htable[y]);
3904 		}
3905 
3906 		kmem_free(intrp->htable, intrp->intr_size);
3907 		return (status);
3908 	}
3909 
3910 	ldgp = hxgep->ldgvp->ldgp;
3911 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3912 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3913 
3914 	if (nactual < nrequired)
3915 		loop = nactual;
3916 	else
3917 		loop = nrequired;
3918 
3919 	for (x = 0; x < loop; x++, ldgp++) {
3920 		ldgp->vector = (uint8_t)x;
3921 		arg1 = ldgp->ldvp;
3922 		arg2 = hxgep;
3923 		if (ldgp->nldvs == 1) {
3924 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3925 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3926 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3927 			    "1-1 int handler (entry %d)\n",
3928 			    arg1, arg2, x));
3929 		} else if (ldgp->nldvs > 1) {
3930 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3931 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3932 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3933 			    "nldevs %d int handler (entry %d)\n",
3934 			    arg1, arg2, ldgp->nldvs, x));
3935 		}
3936 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3937 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3938 		    "htable 0x%llx", x, intrp->htable[x]));
3939 
3940 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3941 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3942 		    DDI_SUCCESS) {
3943 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3944 			    "==> hxge_add_intrs_adv_type: failed #%d "
3945 			    "status 0x%x", x, ddi_status));
3946 			for (y = 0; y < intrp->intr_added; y++) {
3947 				(void) ddi_intr_remove_handler(
3948 				    intrp->htable[y]);
3949 			}
3950 
3951 			/* Free already allocated intr */
3952 			for (y = 0; y < nactual; y++) {
3953 				(void) ddi_intr_free(intrp->htable[y]);
3954 			}
3955 			kmem_free(intrp->htable, intrp->intr_size);
3956 
3957 			(void) hxge_ldgv_uninit(hxgep);
3958 
3959 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3960 		}
3961 
3962 		intrp->intr_added++;
3963 	}
3964 	intrp->msi_intx_cnt = nactual;
3965 
3966 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3967 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3968 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3969 
3970 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3971 	(void) hxge_intr_ldgv_init(hxgep);
3972 
3973 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3974 
3975 	return (status);
3976 }
3977 
3978 /*ARGSUSED*/
3979 static hxge_status_t
3980 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3981 {
3982 	dev_info_t	*dip = hxgep->dip;
3983 	p_hxge_ldg_t	ldgp;
3984 	p_hxge_intr_t	intrp;
3985 	uint_t		*inthandler;
3986 	void		*arg1, *arg2;
3987 	int		behavior;
3988 	int		nintrs, navail;
3989 	int		nactual, nrequired;
3990 	int		inum = 0;
3991 	int		x, y;
3992 	int		ddi_status = DDI_SUCCESS;
3993 	hxge_status_t	status = HXGE_OK;
3994 
3995 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3996 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3997 
3998 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3999 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
4000 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4001 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
4002 		    "nintrs: %d", status, nintrs));
4003 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4004 	}
4005 
4006 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
4007 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4008 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4009 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
4010 		    "nintrs: %d", ddi_status, navail));
4011 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4012 	}
4013 
4014 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
4015 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4016 	    nintrs, navail));
4017 
4018 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4019 	    DDI_INTR_ALLOC_NORMAL);
4020 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4021 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4022 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4023 	    navail, &nactual, behavior);
4024 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
4025 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4026 		    " ddi_intr_alloc() failed: %d", ddi_status));
4027 		kmem_free(intrp->htable, intrp->intr_size);
4028 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4029 	}
4030 
4031 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4032 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4033 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4034 		    " ddi_intr_get_pri() failed: %d", ddi_status));
4035 		/* Free already allocated interrupts */
4036 		for (y = 0; y < nactual; y++) {
4037 			(void) ddi_intr_free(intrp->htable[y]);
4038 		}
4039 
4040 		kmem_free(intrp->htable, intrp->intr_size);
4041 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4042 	}
4043 
4044 	nrequired = 0;
4045 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4046 	if (status != HXGE_OK) {
4047 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4048 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4049 		    "failed: 0x%x", status));
4050 		/* Free already allocated interrupts */
4051 		for (y = 0; y < nactual; y++) {
4052 			(void) ddi_intr_free(intrp->htable[y]);
4053 		}
4054 
4055 		kmem_free(intrp->htable, intrp->intr_size);
4056 		return (status);
4057 	}
4058 
4059 	ldgp = hxgep->ldgvp->ldgp;
4060 	for (x = 0; x < nrequired; x++, ldgp++) {
4061 		ldgp->vector = (uint8_t)x;
4062 		arg1 = ldgp->ldvp;
4063 		arg2 = hxgep;
4064 		if (ldgp->nldvs == 1) {
4065 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4066 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4067 			    "hxge_add_intrs_adv_type_fix: "
4068 			    "1-1 int handler(%d) ldg %d ldv %d "
4069 			    "arg1 $%p arg2 $%p\n",
4070 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4071 		} else if (ldgp->nldvs > 1) {
4072 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4073 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4074 			    "hxge_add_intrs_adv_type_fix: "
4075 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
4076 			    "arg1 0x%016llx arg2 0x%016llx\n",
4077 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4078 			    arg1, arg2));
4079 		}
4080 
4081 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4082 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4083 		    DDI_SUCCESS) {
4084 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4085 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
4086 			    "status 0x%x", x, ddi_status));
4087 			for (y = 0; y < intrp->intr_added; y++) {
4088 				(void) ddi_intr_remove_handler(
4089 				    intrp->htable[y]);
4090 			}
4091 			for (y = 0; y < nactual; y++) {
4092 				(void) ddi_intr_free(intrp->htable[y]);
4093 			}
4094 			/* Free already allocated intr */
4095 			kmem_free(intrp->htable, intrp->intr_size);
4096 
4097 			(void) hxge_ldgv_uninit(hxgep);
4098 
4099 			return (HXGE_ERROR | HXGE_DDI_FAILED);
4100 		}
4101 		intrp->intr_added++;
4102 	}
4103 
4104 	intrp->msi_intx_cnt = nactual;
4105 
4106 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4107 
4108 	status = hxge_intr_ldgv_init(hxgep);
4109 
4110 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4111 
4112 	return (status);
4113 }
4114 
4115 /*ARGSUSED*/
4116 static void
4117 hxge_remove_intrs(p_hxge_t hxgep)
4118 {
4119 	int		i, inum;
4120 	p_hxge_intr_t	intrp;
4121 
4122 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4123 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4124 	if (!intrp->intr_registered) {
4125 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4126 		    "<== hxge_remove_intrs: interrupts not registered"));
4127 		return;
4128 	}
4129 
4130 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4131 
4132 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4133 		(void) ddi_intr_block_disable(intrp->htable,
4134 		    intrp->intr_added);
4135 	} else {
4136 		for (i = 0; i < intrp->intr_added; i++) {
4137 			(void) ddi_intr_disable(intrp->htable[i]);
4138 		}
4139 	}
4140 
4141 	for (inum = 0; inum < intrp->intr_added; inum++) {
4142 		if (intrp->htable[inum]) {
4143 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
4144 		}
4145 	}
4146 
4147 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4148 		if (intrp->htable[inum]) {
4149 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4150 			    "hxge_remove_intrs: ddi_intr_free inum %d "
4151 			    "msi_intx_cnt %d intr_added %d",
4152 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
4153 
4154 			(void) ddi_intr_free(intrp->htable[inum]);
4155 		}
4156 	}
4157 
4158 	kmem_free(intrp->htable, intrp->intr_size);
4159 	intrp->intr_registered = B_FALSE;
4160 	intrp->intr_enabled = B_FALSE;
4161 	intrp->msi_intx_cnt = 0;
4162 	intrp->intr_added = 0;
4163 
4164 	(void) hxge_ldgv_uninit(hxgep);
4165 
4166 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4167 }
4168 
4169 /*ARGSUSED*/
4170 static void
4171 hxge_remove_soft_intrs(p_hxge_t hxgep)
4172 {
4173 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
4174 
4175 	if (hxgep->resched_id) {
4176 		ddi_remove_softintr(hxgep->resched_id);
4177 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4178 		    "==> hxge_remove_soft_intrs: removed"));
4179 		hxgep->resched_id = NULL;
4180 	}
4181 
4182 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
4183 }
4184 
4185 /*ARGSUSED*/
4186 void
4187 hxge_intrs_enable(p_hxge_t hxgep)
4188 {
4189 	p_hxge_intr_t	intrp;
4190 	int		i;
4191 	int		status;
4192 
4193 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4194 
4195 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4196 
4197 	if (!intrp->intr_registered) {
4198 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4199 		    "interrupts are not registered"));
4200 		return;
4201 	}
4202 
4203 	if (intrp->intr_enabled) {
4204 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4205 		    "<== hxge_intrs_enable: already enabled"));
4206 		return;
4207 	}
4208 
4209 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4210 		status = ddi_intr_block_enable(intrp->htable,
4211 		    intrp->intr_added);
4212 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4213 		    "block enable - status 0x%x total inums #%d\n",
4214 		    status, intrp->intr_added));
4215 	} else {
4216 		for (i = 0; i < intrp->intr_added; i++) {
4217 			status = ddi_intr_enable(intrp->htable[i]);
4218 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4219 			    "ddi_intr_enable:enable - status 0x%x "
4220 			    "total inums %d enable inum #%d\n",
4221 			    status, intrp->intr_added, i));
4222 			if (status == DDI_SUCCESS) {
4223 				intrp->intr_enabled = B_TRUE;
4224 			}
4225 		}
4226 	}
4227 
4228 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4229 }
4230 
4231 /*ARGSUSED*/
4232 static void
4233 hxge_intrs_disable(p_hxge_t hxgep)
4234 {
4235 	p_hxge_intr_t	intrp;
4236 	int		i;
4237 
4238 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4239 
4240 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4241 
4242 	if (!intrp->intr_registered) {
4243 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4244 		    "interrupts are not registered"));
4245 		return;
4246 	}
4247 
4248 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4249 		(void) ddi_intr_block_disable(intrp->htable,
4250 		    intrp->intr_added);
4251 	} else {
4252 		for (i = 0; i < intrp->intr_added; i++) {
4253 			(void) ddi_intr_disable(intrp->htable[i]);
4254 		}
4255 	}
4256 
4257 	intrp->intr_enabled = B_FALSE;
4258 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4259 }
4260 
4261 static hxge_status_t
4262 hxge_mac_register(p_hxge_t hxgep)
4263 {
4264 	mac_register_t	*macp;
4265 	int		status;
4266 
4267 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4268 
4269 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4270 		return (HXGE_ERROR);
4271 
4272 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4273 	macp->m_driver = hxgep;
4274 	macp->m_dip = hxgep->dip;
4275 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4276 
4277 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4278 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4279 	    macp->m_src_addr[0],
4280 	    macp->m_src_addr[1],
4281 	    macp->m_src_addr[2],
4282 	    macp->m_src_addr[3],
4283 	    macp->m_src_addr[4],
4284 	    macp->m_src_addr[5]));
4285 
4286 	macp->m_callbacks = &hxge_m_callbacks;
4287 	macp->m_min_sdu = 0;
4288 	macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4289 	macp->m_margin = VLAN_TAGSZ;
4290 	macp->m_priv_props = hxge_priv_props;
4291 	macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS;
4292 
4293 	status = mac_register(macp, &hxgep->mach);
4294 	mac_free(macp);
4295 
4296 	if (status != 0) {
4297 		cmn_err(CE_WARN,
4298 		    "hxge_mac_register failed (status %d instance %d)",
4299 		    status, hxgep->instance);
4300 		return (HXGE_ERROR);
4301 	}
4302 
4303 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4304 	    "(instance %d)", hxgep->instance));
4305 
4306 	return (HXGE_OK);
4307 }
4308 
4309 static int
4310 hxge_init_common_dev(p_hxge_t hxgep)
4311 {
4312 	p_hxge_hw_list_t	hw_p;
4313 	dev_info_t		*p_dip;
4314 
4315 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4316 
4317 	p_dip = hxgep->p_dip;
4318 	MUTEX_ENTER(&hxge_common_lock);
4319 
4320 	/*
4321 	 * Loop through existing per Hydra hardware list.
4322 	 */
4323 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4324 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4325 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4326 		    hw_p, p_dip));
4327 		if (hw_p->parent_devp == p_dip) {
4328 			hxgep->hxge_hw_p = hw_p;
4329 			hw_p->ndevs++;
4330 			hw_p->hxge_p = hxgep;
4331 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4332 			    "==> hxge_init_common_device: "
4333 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4334 			    hw_p, p_dip, hw_p->ndevs));
4335 			break;
4336 		}
4337 	}
4338 
4339 	if (hw_p == NULL) {
4340 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4341 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4342 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4343 		hw_p->parent_devp = p_dip;
4344 		hw_p->magic = HXGE_MAGIC;
4345 		hxgep->hxge_hw_p = hw_p;
4346 		hw_p->ndevs++;
4347 		hw_p->hxge_p = hxgep;
4348 		hw_p->next = hxge_hw_list;
4349 
4350 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4351 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4352 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4353 
4354 		hxge_hw_list = hw_p;
4355 	}
4356 	MUTEX_EXIT(&hxge_common_lock);
4357 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4358 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4359 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4360 
4361 	return (HXGE_OK);
4362 }
4363 
4364 static void
4365 hxge_uninit_common_dev(p_hxge_t hxgep)
4366 {
4367 	p_hxge_hw_list_t	hw_p, h_hw_p;
4368 	dev_info_t		*p_dip;
4369 
4370 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4371 	if (hxgep->hxge_hw_p == NULL) {
4372 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4373 		    "<== hxge_uninit_common_dev (no common)"));
4374 		return;
4375 	}
4376 
4377 	MUTEX_ENTER(&hxge_common_lock);
4378 	h_hw_p = hxge_hw_list;
4379 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4380 		p_dip = hw_p->parent_devp;
4381 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4382 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4383 		    hw_p->magic == HXGE_MAGIC) {
4384 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4385 			    "==> hxge_uninit_common_dev: "
4386 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4387 			    hw_p, p_dip, hw_p->ndevs));
4388 
4389 			hxgep->hxge_hw_p = NULL;
4390 			if (hw_p->ndevs) {
4391 				hw_p->ndevs--;
4392 			}
4393 			hw_p->hxge_p = NULL;
4394 			if (!hw_p->ndevs) {
4395 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4396 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4397 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4398 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4399 				    "==> hxge_uninit_common_dev: "
4400 				    "hw_p $%p parent dip $%p ndevs %d (last)",
4401 				    hw_p, p_dip, hw_p->ndevs));
4402 
4403 				if (hw_p == hxge_hw_list) {
4404 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4405 					    "==> hxge_uninit_common_dev:"
4406 					    "remove head "
4407 					    "hw_p $%p parent dip $%p "
4408 					    "ndevs %d (head)",
4409 					    hw_p, p_dip, hw_p->ndevs));
4410 					hxge_hw_list = hw_p->next;
4411 				} else {
4412 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4413 					    "==> hxge_uninit_common_dev:"
4414 					    "remove middle "
4415 					    "hw_p $%p parent dip $%p "
4416 					    "ndevs %d (middle)",
4417 					    hw_p, p_dip, hw_p->ndevs));
4418 					h_hw_p->next = hw_p->next;
4419 				}
4420 
4421 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4422 			}
4423 			break;
4424 		} else {
4425 			h_hw_p = hw_p;
4426 		}
4427 	}
4428 
4429 	MUTEX_EXIT(&hxge_common_lock);
4430 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4431 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4432 
4433 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4434 }
4435 
4436 #define	HXGE_MSIX_ENTRIES		32
4437 #define	HXGE_MSIX_WAIT_COUNT		10
4438 #define	HXGE_MSIX_PARITY_CHECK_COUNT	30
4439 
4440 static void
4441 hxge_link_poll(void *arg)
4442 {
4443 	p_hxge_t		hxgep = (p_hxge_t)arg;
4444 	hpi_handle_t		handle;
4445 	cip_link_stat_t		link_stat;
4446 	hxge_timeout		*to = &hxgep->timeout;
4447 
4448 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
4449 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4450 
4451 	if (to->report_link_status ||
4452 	    (to->link_status != link_stat.bits.xpcs0_link_up)) {
4453 		to->link_status = link_stat.bits.xpcs0_link_up;
4454 		to->report_link_status = B_FALSE;
4455 
4456 		if (link_stat.bits.xpcs0_link_up) {
4457 			hxge_link_update(hxgep, LINK_STATE_UP);
4458 		} else {
4459 			hxge_link_update(hxgep, LINK_STATE_DOWN);
4460 		}
4461 	}
4462 
4463 	/* Restart the link status timer to check the link status */
4464 	MUTEX_ENTER(&to->lock);
4465 	to->id = timeout(hxge_link_poll, arg, to->ticks);
4466 	MUTEX_EXIT(&to->lock);
4467 }
4468 
4469 static void
4470 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4471 {
4472 	p_hxge_stats_t		statsp = (p_hxge_stats_t)hxgep->statsp;
4473 
4474 	mac_link_update(hxgep->mach, state);
4475 	if (state == LINK_STATE_UP) {
4476 		statsp->mac_stats.link_speed = 10000;
4477 		statsp->mac_stats.link_duplex = 2;
4478 		statsp->mac_stats.link_up = 1;
4479 	} else {
4480 		statsp->mac_stats.link_speed = 0;
4481 		statsp->mac_stats.link_duplex = 0;
4482 		statsp->mac_stats.link_up = 0;
4483 	}
4484 }
4485 
4486 static void
4487 hxge_msix_init(p_hxge_t hxgep)
4488 {
4489 	indacc_mem1_ctrl_t	indacc_mem1_ctrl;
4490 	indacc_mem1_data0_t	data0;
4491 	indacc_mem1_data1_t	data1;
4492 	indacc_mem1_data2_t	data2;
4493 	indacc_mem1_prty_t	prty;
4494 	int			count;
4495 	int			i;
4496 
4497 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4498 		indacc_mem1_ctrl.value = 0;
4499 		HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL,
4500 		    &indacc_mem1_ctrl.value);
4501 
4502 		data0.value = 0xffffffff - i;
4503 		HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_DATA0,
4504 		    data0.value);
4505 		data1.value = 0xffffffff - i - 1;
4506 		HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_DATA1,
4507 		    data1.value);
4508 		data2.value = 0xffffffff - i - 2;
4509 		HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_DATA2,
4510 		    data2.value);
4511 
4512 		indacc_mem1_ctrl.value = 0;
4513 		indacc_mem1_ctrl.bits.mem1_addr = i;
4514 		indacc_mem1_ctrl.bits.mem1_sel = 2;
4515 		indacc_mem1_ctrl.bits.mem1_prty_wen = 0;
4516 		indacc_mem1_ctrl.bits.mem1_command = 0;
4517 		indacc_mem1_ctrl.bits.mem1_diagen = 1;
4518 		HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_CTRL,
4519 		    indacc_mem1_ctrl.value);
4520 
4521 		/* check that operation completed */
4522 		HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL,
4523 		    &indacc_mem1_ctrl.value);
4524 
4525 		count = 0;
4526 		while (indacc_mem1_ctrl.bits.mem1_access_status != 1 &&
4527 		    count++ < HXGE_MSIX_WAIT_COUNT) {
4528 			HXGE_DELAY(1);
4529 			indacc_mem1_ctrl.value = 0;
4530 			HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL,
4531 			    &indacc_mem1_ctrl.value);
4532 		}
4533 	}
4534 
4535 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4536 		indacc_mem1_ctrl.value = 0;
4537 		indacc_mem1_ctrl.bits.mem1_addr = i;
4538 		indacc_mem1_ctrl.bits.mem1_sel = 2;
4539 		indacc_mem1_ctrl.bits.mem1_prty_wen = 0;
4540 		indacc_mem1_ctrl.bits.mem1_command = 1;
4541 		indacc_mem1_ctrl.bits.mem1_diagen = 1;
4542 
4543 		/* issue read command */
4544 		HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_CTRL,
4545 		    indacc_mem1_ctrl.value);
4546 
4547 		/* wait for read operation to complete */
4548 		count = 0;
4549 		HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL,
4550 		    &indacc_mem1_ctrl.value);
4551 		while (indacc_mem1_ctrl.bits.mem1_access_status != 1 &&
4552 		    count++ < HXGE_MSIX_WAIT_COUNT) {
4553 			HXGE_DELAY(1);
4554 			HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL,
4555 			    &indacc_mem1_ctrl.value);
4556 		}
4557 
4558 
4559 
4560 		data0.value = data1.value = data2.value = prty.value = 0;
4561 		HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_DATA0,
4562 		    &data0.value);
4563 		HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_DATA1,
4564 		    &data1.value);
4565 		HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_DATA2,
4566 		    &data2.value);
4567 		HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_PRTY,
4568 		    &prty.value);
4569 	}
4570 
4571 	/* Turn off diagnostic mode */
4572 	indacc_mem1_ctrl.value = 0;
4573 	indacc_mem1_ctrl.bits.mem1_addr = 0;
4574 	indacc_mem1_ctrl.bits.mem1_sel = 0;
4575 	indacc_mem1_ctrl.bits.mem1_prty_wen = 0;
4576 	indacc_mem1_ctrl.bits.mem1_command = 0;
4577 	indacc_mem1_ctrl.bits.mem1_diagen = 0;
4578 	HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_CTRL,
4579 	    indacc_mem1_ctrl.value);
4580 }
4581 
4582 void
4583 hxge_check_msix_parity_err(p_hxge_t hxgep)
4584 {
4585 	indacc_mem1_ctrl_t	indacc_mem1_ctrl;
4586 	indacc_mem1_data0_t	data0;
4587 	indacc_mem1_data1_t	data1;
4588 	indacc_mem1_data2_t	data2;
4589 	indacc_mem1_prty_t	prty;
4590 	uint32_t		parity = 0;
4591 	int			count;
4592 	int			i;
4593 
4594 	hpi_handle_t		handle;
4595 	p_hxge_peu_sys_stats_t	statsp;
4596 
4597 	handle = hxgep->hpi_handle;
4598 	statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
4599 
4600 	for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4601 		indacc_mem1_ctrl.value = 0;
4602 		indacc_mem1_ctrl.bits.mem1_addr = i;
4603 		indacc_mem1_ctrl.bits.mem1_sel = 2;
4604 		indacc_mem1_ctrl.bits.mem1_prty_wen = 0;
4605 		indacc_mem1_ctrl.bits.mem1_command = 1;
4606 		indacc_mem1_ctrl.bits.mem1_diagen = 1;
4607 
4608 		/* issue read command */
4609 		HXGE_REG_WR32(handle, INDACC_MEM1_CTRL, indacc_mem1_ctrl.value);
4610 
4611 		/* wait for read operation to complete */
4612 		count = 0;
4613 		HXGE_REG_RD32(handle, INDACC_MEM1_CTRL,
4614 		    &indacc_mem1_ctrl.value);
4615 		while (indacc_mem1_ctrl.bits.mem1_access_status != 1 &&
4616 		    count++ < HXGE_MSIX_WAIT_COUNT) {
4617 			HXGE_DELAY(1);
4618 			HXGE_REG_RD32(handle, INDACC_MEM1_CTRL,
4619 			    &indacc_mem1_ctrl.value);
4620 		}
4621 
4622 		data0.value = data1.value = data2.value = prty.value = 0;
4623 		HXGE_REG_RD32(handle, INDACC_MEM1_DATA0, &data0.value);
4624 		HXGE_REG_RD32(handle, INDACC_MEM1_DATA1, &data1.value);
4625 		HXGE_REG_RD32(handle, INDACC_MEM1_DATA2, &data2.value);
4626 		HXGE_REG_RD32(handle, INDACC_MEM1_PRTY, &prty.value);
4627 
4628 		parity = gen_32bit_parity(data0.value, B_FALSE) |
4629 		    (gen_32bit_parity(data1.value, B_FALSE) << 4) |
4630 		    (gen_32bit_parity(data2.value, B_FALSE) << 8);
4631 
4632 		if (parity != prty.bits.mem1_parity) {
4633 			statsp->eic_msix_parerr++;
4634 			if (statsp->eic_msix_parerr == 1) {
4635 				HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4636 				    "==> hxge_check_msix_parity_err: "
4637 				    "eic_msix_parerr"));
4638 				HXGE_FM_REPORT_ERROR(hxgep, NULL,
4639 				    HXGE_FM_EREPORT_PEU_ERR);
4640 			}
4641 		}
4642 	}
4643 
4644 	/* Turn off diagnostic mode */
4645 	indacc_mem1_ctrl.value = 0;
4646 	indacc_mem1_ctrl.bits.mem1_addr = 0;
4647 	indacc_mem1_ctrl.bits.mem1_sel = 0;
4648 	indacc_mem1_ctrl.bits.mem1_prty_wen = 0;
4649 	indacc_mem1_ctrl.bits.mem1_command = 0;
4650 	indacc_mem1_ctrl.bits.mem1_diagen = 0;
4651 	HXGE_REG_WR32(handle, INDACC_MEM1_CTRL, indacc_mem1_ctrl.value);
4652 }
4653 
4654 static uint8_t
4655 gen_32bit_parity(uint32_t data, boolean_t odd_parity)
4656 {
4657 	uint8_t		parity = 0;
4658 	uint8_t		data_byte = 0;
4659 	uint8_t		parity_bit = 0;
4660 	uint32_t	i = 0, j = 0;
4661 
4662 	for (i = 0; i < 4; i++) {
4663 		data_byte = (data >> (i * 8)) & 0xffULL;
4664 		parity_bit = odd_parity ? 1 : 0;
4665 		for (j = 0; j < 8; j++) {
4666 			parity_bit ^= (data_byte >> j) & 0x1ULL;
4667 		}
4668 		parity |= (parity_bit << i);
4669 	}
4670 
4671 	return (parity);
4672 }
4673