xref: /titanic_41/usr/src/uts/common/io/hxge/hxge_main.c (revision a6d42e7d71324c5193c3b94d57d96ba2925d52e1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28  */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31 
32 /*
33  * PSARC/2007/453 MSI-X interrupt limit override
34  * (This PSARC case is limited to MSI-X vectors
35  *  and SPARC platforms only).
36  */
37 #if defined(_BIG_ENDIAN)
38 uint32_t hxge_msi_enable = 2;
39 #else
40 uint32_t hxge_msi_enable = 1;
41 #endif
42 
43 /*
44  * Globals: tunable parameters (/etc/system or adb)
45  *
46  */
47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
48 uint32_t hxge_rbr_spare_size = 0;
49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
55 
56 static hxge_os_mutex_t hxgedebuglock;
57 static int hxge_debug_init = 0;
58 
59 /*
60  * Debugging flags:
61  *		hxge_no_tx_lb : transmit load balancing
62  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
63  *				   1 - From the Stack
64  *				   2 - Destination IP Address
65  */
66 uint32_t hxge_no_tx_lb = 0;
67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
68 
69 /*
70  * Add tunable to reduce the amount of time spent in the
71  * ISR doing Rx Processing.
72  */
73 uint32_t hxge_max_rx_pkts = 1024;
74 
75 /*
76  * Tunables to manage the receive buffer blocks.
77  *
78  * hxge_rx_threshold_hi: copy all buffers.
79  * hxge_rx_bcopy_size_type: receive buffer block size type.
80  * hxge_rx_threshold_lo: copy only up to tunable block size type.
81  */
82 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
83 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
84 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3;
85 
86 rtrace_t hpi_rtracebuf;
87 
88 /*
89  * Function Prototypes
90  */
91 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
92 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
93 static void hxge_unattach(p_hxge_t);
94 
95 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
96 
97 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
98 static void hxge_destroy_mutexes(p_hxge_t);
99 
100 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
101 static void hxge_unmap_regs(p_hxge_t hxgep);
102 
103 hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
104 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
105 static void hxge_remove_intrs(p_hxge_t hxgep);
106 static void hxge_remove_soft_intrs(p_hxge_t hxgep);
107 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
108 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
109 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
110 void hxge_intrs_enable(p_hxge_t hxgep);
111 static void hxge_intrs_disable(p_hxge_t hxgep);
112 static void hxge_suspend(p_hxge_t);
113 static hxge_status_t hxge_resume(p_hxge_t);
114 hxge_status_t hxge_setup_dev(p_hxge_t);
115 static void hxge_destroy_dev(p_hxge_t);
116 hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
117 static void hxge_free_mem_pool(p_hxge_t);
118 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
119 static void hxge_free_rx_mem_pool(p_hxge_t);
120 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
121 static void hxge_free_tx_mem_pool(p_hxge_t);
122 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
123     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
124     p_hxge_dma_common_t);
125 static void hxge_dma_mem_free(p_hxge_dma_common_t);
126 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
127     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
128 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
129 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
130     p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
131 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
132 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
133     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
134 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
135 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
136     p_hxge_dma_common_t *, size_t);
137 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
138 static int hxge_init_common_dev(p_hxge_t);
139 static void hxge_uninit_common_dev(p_hxge_t);
140 
141 /*
142  * The next declarations are for the GLDv3 interface.
143  */
144 static int hxge_m_start(void *);
145 static void hxge_m_stop(void *);
146 static int hxge_m_unicst(void *, const uint8_t *);
147 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
148 static int hxge_m_promisc(void *, boolean_t);
149 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
150 static void hxge_m_resources(void *);
151 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
152 
153 static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
154 static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
155 static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
156 static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
157 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
158 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
159 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
160     uint_t pr_valsize, const void *pr_val);
161 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
162     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
163 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num,
164     uint_t pr_valsize, void *pr_val);
165 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
166     uint_t pr_valsize, const void *pr_val);
167 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
168     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
169 static void hxge_link_poll(void *arg);
170 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
171 
172 mac_priv_prop_t hxge_priv_props[] = {
173 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
174 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
175 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
176 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
177 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
178 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
179 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
180 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
181 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
182 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
183 };
184 
185 #define	HXGE_MAX_PRIV_PROPS	\
186 	(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
187 
188 #define	HXGE_MAGIC	0x4E584745UL
189 #define	MAX_DUMP_SZ 256
190 
191 #define	HXGE_M_CALLBACK_FLAGS	\
192 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
193 
194 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
195 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
196 
197 static mac_callbacks_t hxge_m_callbacks = {
198 	HXGE_M_CALLBACK_FLAGS,
199 	hxge_m_stat,
200 	hxge_m_start,
201 	hxge_m_stop,
202 	hxge_m_promisc,
203 	hxge_m_multicst,
204 	hxge_m_unicst,
205 	hxge_m_tx,
206 	hxge_m_resources,
207 	hxge_m_ioctl,
208 	hxge_m_getcapab,
209 	NULL,
210 	NULL,
211 	hxge_m_setprop,
212 	hxge_m_getprop
213 };
214 
215 /* Enable debug messages as necessary. */
216 uint64_t hxge_debug_level = 0;
217 
218 /*
219  * This list contains the instance structures for the Hydra
220  * devices present in the system. The lock exists to guarantee
221  * mutually exclusive access to the list.
222  */
223 void *hxge_list = NULL;
224 void *hxge_hw_list = NULL;
225 hxge_os_mutex_t hxge_common_lock;
226 
227 extern uint64_t hpi_debug_level;
228 
229 extern hxge_status_t hxge_ldgv_init();
230 extern hxge_status_t hxge_ldgv_uninit();
231 extern hxge_status_t hxge_intr_ldgv_init();
232 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
233     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
234 extern void hxge_fm_fini(p_hxge_t hxgep);
235 
236 /*
237  * Count used to maintain the number of buffers being used
238  * by Hydra instances and loaned up to the upper layers.
239  */
240 uint32_t hxge_mblks_pending = 0;
241 
242 /*
243  * Device register access attributes for PIO.
244  */
245 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
246 	DDI_DEVICE_ATTR_V0,
247 	DDI_STRUCTURE_LE_ACC,
248 	DDI_STRICTORDER_ACC,
249 };
250 
251 /*
252  * Device descriptor access attributes for DMA.
253  */
254 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
255 	DDI_DEVICE_ATTR_V0,
256 	DDI_STRUCTURE_LE_ACC,
257 	DDI_STRICTORDER_ACC
258 };
259 
260 /*
261  * Device buffer access attributes for DMA.
262  */
263 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
264 	DDI_DEVICE_ATTR_V0,
265 	DDI_STRUCTURE_BE_ACC,
266 	DDI_STRICTORDER_ACC
267 };
268 
269 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
270 	DMA_ATTR_V0,		/* version number. */
271 	0,			/* low address */
272 	0xffffffffffffffff,	/* high address */
273 	0xffffffffffffffff,	/* address counter max */
274 	0x80000,		/* alignment */
275 	0xfc00fc,		/* dlim_burstsizes */
276 	0x1,			/* minimum transfer size */
277 	0xffffffffffffffff,	/* maximum transfer size */
278 	0xffffffffffffffff,	/* maximum segment size */
279 	1,			/* scatter/gather list length */
280 	(unsigned int)1,	/* granularity */
281 	0			/* attribute flags */
282 };
283 
284 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
285 	DMA_ATTR_V0,		/* version number. */
286 	0,			/* low address */
287 	0xffffffffffffffff,	/* high address */
288 	0xffffffffffffffff,	/* address counter max */
289 	0x100000,		/* alignment */
290 	0xfc00fc,		/* dlim_burstsizes */
291 	0x1,			/* minimum transfer size */
292 	0xffffffffffffffff,	/* maximum transfer size */
293 	0xffffffffffffffff,	/* maximum segment size */
294 	1,			/* scatter/gather list length */
295 	(unsigned int)1,	/* granularity */
296 	0			/* attribute flags */
297 };
298 
299 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
300 	DMA_ATTR_V0,		/* version number. */
301 	0,			/* low address */
302 	0xffffffffffffffff,	/* high address */
303 	0xffffffffffffffff,	/* address counter max */
304 	0x40000,		/* alignment */
305 	0xfc00fc,		/* dlim_burstsizes */
306 	0x1,			/* minimum transfer size */
307 	0xffffffffffffffff,	/* maximum transfer size */
308 	0xffffffffffffffff,	/* maximum segment size */
309 	1,			/* scatter/gather list length */
310 	(unsigned int)1,	/* granularity */
311 	0			/* attribute flags */
312 };
313 
314 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
315 	DMA_ATTR_V0,		/* version number. */
316 	0,			/* low address */
317 	0xffffffffffffffff,	/* high address */
318 	0xffffffffffffffff,	/* address counter max */
319 #if defined(_BIG_ENDIAN)
320 	0x2000,			/* alignment */
321 #else
322 	0x1000,			/* alignment */
323 #endif
324 	0xfc00fc,		/* dlim_burstsizes */
325 	0x1,			/* minimum transfer size */
326 	0xffffffffffffffff,	/* maximum transfer size */
327 	0xffffffffffffffff,	/* maximum segment size */
328 	5,			/* scatter/gather list length */
329 	(unsigned int)1,	/* granularity */
330 	0			/* attribute flags */
331 };
332 
333 ddi_dma_attr_t hxge_tx_dma_attr = {
334 	DMA_ATTR_V0,		/* version number. */
335 	0,			/* low address */
336 	0xffffffffffffffff,	/* high address */
337 	0xffffffffffffffff,	/* address counter max */
338 #if defined(_BIG_ENDIAN)
339 	0x2000,			/* alignment */
340 #else
341 	0x1000,			/* alignment */
342 #endif
343 	0xfc00fc,		/* dlim_burstsizes */
344 	0x1,			/* minimum transfer size */
345 	0xffffffffffffffff,	/* maximum transfer size */
346 	0xffffffffffffffff,	/* maximum segment size */
347 	5,			/* scatter/gather list length */
348 	(unsigned int)1,	/* granularity */
349 	0			/* attribute flags */
350 };
351 
352 ddi_dma_attr_t hxge_rx_dma_attr = {
353 	DMA_ATTR_V0,		/* version number. */
354 	0,			/* low address */
355 	0xffffffffffffffff,	/* high address */
356 	0xffffffffffffffff,	/* address counter max */
357 	0x10000,		/* alignment */
358 	0xfc00fc,		/* dlim_burstsizes */
359 	0x1,			/* minimum transfer size */
360 	0xffffffffffffffff,	/* maximum transfer size */
361 	0xffffffffffffffff,	/* maximum segment size */
362 	1,			/* scatter/gather list length */
363 	(unsigned int)1,	/* granularity */
364 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
365 };
366 
367 ddi_dma_lim_t hxge_dma_limits = {
368 	(uint_t)0,		/* dlim_addr_lo */
369 	(uint_t)0xffffffff,	/* dlim_addr_hi */
370 	(uint_t)0xffffffff,	/* dlim_cntr_max */
371 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
372 	0x1,			/* dlim_minxfer */
373 	1024			/* dlim_speed */
374 };
375 
376 dma_method_t hxge_force_dma = DVMA;
377 
378 /*
379  * dma chunk sizes.
380  *
381  * Try to allocate the largest possible size
382  * so that fewer number of dma chunks would be managed
383  */
384 size_t alloc_sizes[] = {
385     0x1000, 0x2000, 0x4000, 0x8000,
386     0x10000, 0x20000, 0x40000, 0x80000,
387     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
388 };
389 
390 /*
391  * Translate "dev_t" to a pointer to the associated "dev_info_t".
392  */
393 static int
394 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
395 {
396 	p_hxge_t	hxgep = NULL;
397 	int		instance;
398 	int		status = DDI_SUCCESS;
399 
400 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
401 
402 	/*
403 	 * Get the device instance since we'll need to setup or retrieve a soft
404 	 * state for this instance.
405 	 */
406 	instance = ddi_get_instance(dip);
407 
408 	switch (cmd) {
409 	case DDI_ATTACH:
410 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
411 		break;
412 
413 	case DDI_RESUME:
414 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
415 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
416 		if (hxgep == NULL) {
417 			status = DDI_FAILURE;
418 			break;
419 		}
420 		if (hxgep->dip != dip) {
421 			status = DDI_FAILURE;
422 			break;
423 		}
424 		if (hxgep->suspended == DDI_PM_SUSPEND) {
425 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
426 		} else {
427 			(void) hxge_resume(hxgep);
428 		}
429 		goto hxge_attach_exit;
430 
431 	case DDI_PM_RESUME:
432 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
433 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
434 		if (hxgep == NULL) {
435 			status = DDI_FAILURE;
436 			break;
437 		}
438 		if (hxgep->dip != dip) {
439 			status = DDI_FAILURE;
440 			break;
441 		}
442 		(void) hxge_resume(hxgep);
443 		goto hxge_attach_exit;
444 
445 	default:
446 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
447 		status = DDI_FAILURE;
448 		goto hxge_attach_exit;
449 	}
450 
451 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
452 		status = DDI_FAILURE;
453 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
454 		    "ddi_soft_state_zalloc failed"));
455 		goto hxge_attach_exit;
456 	}
457 
458 	hxgep = ddi_get_soft_state(hxge_list, instance);
459 	if (hxgep == NULL) {
460 		status = HXGE_ERROR;
461 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
462 		    "ddi_get_soft_state failed"));
463 		goto hxge_attach_fail2;
464 	}
465 
466 	hxgep->drv_state = 0;
467 	hxgep->dip = dip;
468 	hxgep->instance = instance;
469 	hxgep->p_dip = ddi_get_parent(dip);
470 	hxgep->hxge_debug_level = hxge_debug_level;
471 	hpi_debug_level = hxge_debug_level;
472 
473 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
474 	    &hxge_rx_dma_attr);
475 
476 	status = hxge_map_regs(hxgep);
477 	if (status != HXGE_OK) {
478 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
479 		goto hxge_attach_fail3;
480 	}
481 
482 	status = hxge_init_common_dev(hxgep);
483 	if (status != HXGE_OK) {
484 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
485 		    "hxge_init_common_dev failed"));
486 		goto hxge_attach_fail4;
487 	}
488 
489 	/*
490 	 * Setup the Ndd parameters for this instance.
491 	 */
492 	hxge_init_param(hxgep);
493 
494 	/*
495 	 * Setup Register Tracing Buffer.
496 	 */
497 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
498 
499 	/* init stats ptr */
500 	hxge_init_statsp(hxgep);
501 
502 	status = hxge_setup_mutexes(hxgep);
503 	if (status != HXGE_OK) {
504 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
505 		goto hxge_attach_fail;
506 	}
507 
508 	status = hxge_get_config_properties(hxgep);
509 	if (status != HXGE_OK) {
510 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
511 		goto hxge_attach_fail;
512 	}
513 
514 	/*
515 	 * Setup the Kstats for the driver.
516 	 */
517 	hxge_setup_kstats(hxgep);
518 	hxge_setup_param(hxgep);
519 
520 	status = hxge_setup_system_dma_pages(hxgep);
521 	if (status != HXGE_OK) {
522 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
523 		goto hxge_attach_fail;
524 	}
525 
526 	hxge_hw_id_init(hxgep);
527 	hxge_hw_init_niu_common(hxgep);
528 
529 	status = hxge_setup_dev(hxgep);
530 	if (status != DDI_SUCCESS) {
531 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
532 		goto hxge_attach_fail;
533 	}
534 
535 	status = hxge_add_intrs(hxgep);
536 	if (status != DDI_SUCCESS) {
537 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
538 		goto hxge_attach_fail;
539 	}
540 
541 	status = hxge_add_soft_intrs(hxgep);
542 	if (status != DDI_SUCCESS) {
543 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
544 		goto hxge_attach_fail;
545 	}
546 
547 	/*
548 	 * Enable interrupts.
549 	 */
550 	hxge_intrs_enable(hxgep);
551 
552 	/*
553 	 * Take off all peu parity error mask here after ddi_intr_enable
554 	 * is called
555 	 */
556 	HXGE_REG_WR32(hxgep->hpi_handle, PEU_INTR_MASK, 0x0);
557 
558 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
559 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
560 		    "unable to register to mac layer (%d)", status));
561 		goto hxge_attach_fail;
562 	}
563 	mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
564 
565 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
566 	    instance));
567 
568 	goto hxge_attach_exit;
569 
570 hxge_attach_fail:
571 	hxge_unattach(hxgep);
572 	goto hxge_attach_fail1;
573 
574 hxge_attach_fail5:
575 	/*
576 	 * Tear down the ndd parameters setup.
577 	 */
578 	hxge_destroy_param(hxgep);
579 
580 	/*
581 	 * Tear down the kstat setup.
582 	 */
583 	hxge_destroy_kstats(hxgep);
584 
585 hxge_attach_fail4:
586 	if (hxgep->hxge_hw_p) {
587 		hxge_uninit_common_dev(hxgep);
588 		hxgep->hxge_hw_p = NULL;
589 	}
590 hxge_attach_fail3:
591 	/*
592 	 * Unmap the register setup.
593 	 */
594 	hxge_unmap_regs(hxgep);
595 
596 	hxge_fm_fini(hxgep);
597 
598 hxge_attach_fail2:
599 	ddi_soft_state_free(hxge_list, hxgep->instance);
600 
601 hxge_attach_fail1:
602 	if (status != HXGE_OK)
603 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
604 	hxgep = NULL;
605 
606 hxge_attach_exit:
607 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
608 	    status));
609 
610 	return (status);
611 }
612 
613 static int
614 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
615 {
616 	int		status = DDI_SUCCESS;
617 	int		instance;
618 	p_hxge_t	hxgep = NULL;
619 
620 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
621 	instance = ddi_get_instance(dip);
622 	hxgep = ddi_get_soft_state(hxge_list, instance);
623 	if (hxgep == NULL) {
624 		status = DDI_FAILURE;
625 		goto hxge_detach_exit;
626 	}
627 
628 	switch (cmd) {
629 	case DDI_DETACH:
630 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
631 		break;
632 
633 	case DDI_PM_SUSPEND:
634 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
635 		hxgep->suspended = DDI_PM_SUSPEND;
636 		hxge_suspend(hxgep);
637 		break;
638 
639 	case DDI_SUSPEND:
640 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
641 		if (hxgep->suspended != DDI_PM_SUSPEND) {
642 			hxgep->suspended = DDI_SUSPEND;
643 			hxge_suspend(hxgep);
644 		}
645 		break;
646 
647 	default:
648 		status = DDI_FAILURE;
649 		break;
650 	}
651 
652 	if (cmd != DDI_DETACH)
653 		goto hxge_detach_exit;
654 
655 	/*
656 	 * Stop the xcvr polling.
657 	 */
658 	hxgep->suspended = cmd;
659 
660 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
661 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
662 		    "<== hxge_detach status = 0x%08X", status));
663 		return (DDI_FAILURE);
664 	}
665 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
666 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
667 
668 	hxge_unattach(hxgep);
669 	hxgep = NULL;
670 
671 hxge_detach_exit:
672 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
673 	    status));
674 
675 	return (status);
676 }
677 
678 static void
679 hxge_unattach(p_hxge_t hxgep)
680 {
681 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
682 
683 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
684 		return;
685 	}
686 
687 	if (hxgep->hxge_hw_p) {
688 		hxge_uninit_common_dev(hxgep);
689 		hxgep->hxge_hw_p = NULL;
690 	}
691 
692 	if (hxgep->hxge_timerid) {
693 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
694 		hxgep->hxge_timerid = 0;
695 	}
696 
697 	/* Stop any further interrupts. */
698 	hxge_remove_intrs(hxgep);
699 
700 	/* Remove soft interrups */
701 	hxge_remove_soft_intrs(hxgep);
702 
703 	/* Stop the device and free resources. */
704 	hxge_destroy_dev(hxgep);
705 
706 	/* Tear down the ndd parameters setup. */
707 	hxge_destroy_param(hxgep);
708 
709 	/* Tear down the kstat setup. */
710 	hxge_destroy_kstats(hxgep);
711 
712 	/*
713 	 * Remove the list of ndd parameters which were setup during attach.
714 	 */
715 	if (hxgep->dip) {
716 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
717 		    " hxge_unattach: remove all properties"));
718 		(void) ddi_prop_remove_all(hxgep->dip);
719 	}
720 
721 	/*
722 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
723 	 * previous state before unmapping the registers.
724 	 */
725 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
726 	HXGE_DELAY(1000);
727 
728 	/*
729 	 * Unmap the register setup.
730 	 */
731 	hxge_unmap_regs(hxgep);
732 
733 	hxge_fm_fini(hxgep);
734 
735 	/* Destroy all mutexes.  */
736 	hxge_destroy_mutexes(hxgep);
737 
738 	/*
739 	 * Free the soft state data structures allocated with this instance.
740 	 */
741 	ddi_soft_state_free(hxge_list, hxgep->instance);
742 
743 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
744 }
745 
746 static hxge_status_t
747 hxge_map_regs(p_hxge_t hxgep)
748 {
749 	int		ddi_status = DDI_SUCCESS;
750 	p_dev_regs_t	dev_regs;
751 
752 #ifdef	HXGE_DEBUG
753 	char		*sysname;
754 #endif
755 
756 	off_t		regsize;
757 	hxge_status_t	status = HXGE_OK;
758 	int		nregs;
759 
760 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
761 
762 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
763 		return (HXGE_ERROR);
764 
765 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
766 
767 	hxgep->dev_regs = NULL;
768 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
769 	dev_regs->hxge_regh = NULL;
770 	dev_regs->hxge_pciregh = NULL;
771 	dev_regs->hxge_msix_regh = NULL;
772 
773 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
774 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
775 	    "hxge_map_regs: pci config size 0x%x", regsize));
776 
777 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
778 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
779 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
780 	if (ddi_status != DDI_SUCCESS) {
781 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
782 		    "ddi_map_regs, hxge bus config regs failed"));
783 		goto hxge_map_regs_fail0;
784 	}
785 
786 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
787 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
788 	    dev_regs->hxge_pciregp,
789 	    dev_regs->hxge_pciregh));
790 
791 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
792 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
793 	    "hxge_map_regs: pio size 0x%x", regsize));
794 
795 	/* set up the device mapped register */
796 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
797 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
798 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
799 
800 	if (ddi_status != DDI_SUCCESS) {
801 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
802 		    "ddi_map_regs for Hydra global reg failed"));
803 		goto hxge_map_regs_fail1;
804 	}
805 
806 	/* set up the msi/msi-x mapped register */
807 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
808 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
809 	    "hxge_map_regs: msix size 0x%x", regsize));
810 
811 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
812 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
813 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
814 
815 	if (ddi_status != DDI_SUCCESS) {
816 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
817 		    "ddi_map_regs for msi reg failed"));
818 		goto hxge_map_regs_fail2;
819 	}
820 
821 	hxgep->dev_regs = dev_regs;
822 
823 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
824 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
825 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
826 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
827 
828 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
829 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
830 
831 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
832 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
833 
834 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
835 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
836 
837 	goto hxge_map_regs_exit;
838 
839 hxge_map_regs_fail3:
840 	if (dev_regs->hxge_msix_regh) {
841 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
842 	}
843 
844 hxge_map_regs_fail2:
845 	if (dev_regs->hxge_regh) {
846 		ddi_regs_map_free(&dev_regs->hxge_regh);
847 	}
848 
849 hxge_map_regs_fail1:
850 	if (dev_regs->hxge_pciregh) {
851 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
852 	}
853 
854 hxge_map_regs_fail0:
855 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
856 	kmem_free(dev_regs, sizeof (dev_regs_t));
857 
858 hxge_map_regs_exit:
859 	if (ddi_status != DDI_SUCCESS)
860 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
861 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
862 	return (status);
863 }
864 
865 static void
866 hxge_unmap_regs(p_hxge_t hxgep)
867 {
868 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
869 	if (hxgep->dev_regs) {
870 		if (hxgep->dev_regs->hxge_pciregh) {
871 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
872 			    "==> hxge_unmap_regs: bus"));
873 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
874 			hxgep->dev_regs->hxge_pciregh = NULL;
875 		}
876 
877 		if (hxgep->dev_regs->hxge_regh) {
878 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
879 			    "==> hxge_unmap_regs: device registers"));
880 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
881 			hxgep->dev_regs->hxge_regh = NULL;
882 		}
883 
884 		if (hxgep->dev_regs->hxge_msix_regh) {
885 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
886 			    "==> hxge_unmap_regs: device interrupts"));
887 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
888 			hxgep->dev_regs->hxge_msix_regh = NULL;
889 		}
890 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
891 		hxgep->dev_regs = NULL;
892 	}
893 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
894 }
895 
896 static hxge_status_t
897 hxge_setup_mutexes(p_hxge_t hxgep)
898 {
899 	int		ddi_status = DDI_SUCCESS;
900 	hxge_status_t	status = HXGE_OK;
901 
902 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
903 
904 	/*
905 	 * Get the interrupt cookie so the mutexes can be Initialised.
906 	 */
907 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
908 	    &hxgep->interrupt_cookie);
909 
910 	if (ddi_status != DDI_SUCCESS) {
911 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
912 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
913 		goto hxge_setup_mutexes_exit;
914 	}
915 
916 	/*
917 	 * Initialize mutex's for this device.
918 	 */
919 	MUTEX_INIT(hxgep->genlock, NULL,
920 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
921 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
922 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
923 	RW_INIT(&hxgep->filter_lock, NULL,
924 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
925 	MUTEX_INIT(&hxgep->pio_lock, NULL,
926 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
927 	MUTEX_INIT(&hxgep->timeout.lock, NULL,
928 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
929 
930 hxge_setup_mutexes_exit:
931 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
932 	    "<== hxge_setup_mutexes status = %x", status));
933 
934 	if (ddi_status != DDI_SUCCESS)
935 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
936 
937 	return (status);
938 }
939 
940 static void
941 hxge_destroy_mutexes(p_hxge_t hxgep)
942 {
943 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
944 	RW_DESTROY(&hxgep->filter_lock);
945 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
946 	MUTEX_DESTROY(hxgep->genlock);
947 	MUTEX_DESTROY(&hxgep->pio_lock);
948 	MUTEX_DESTROY(&hxgep->timeout.lock);
949 
950 	if (hxge_debug_init == 1) {
951 		MUTEX_DESTROY(&hxgedebuglock);
952 		hxge_debug_init = 0;
953 	}
954 
955 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
956 }
957 
958 hxge_status_t
959 hxge_init(p_hxge_t hxgep)
960 {
961 	hxge_status_t status = HXGE_OK;
962 
963 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
964 
965 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
966 		return (status);
967 	}
968 
969 	/*
970 	 * Allocate system memory for the receive/transmit buffer blocks and
971 	 * receive/transmit descriptor rings.
972 	 */
973 	status = hxge_alloc_mem_pool(hxgep);
974 	if (status != HXGE_OK) {
975 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
976 		goto hxge_init_fail1;
977 	}
978 
979 	/*
980 	 * Initialize and enable TXDMA channels.
981 	 */
982 	status = hxge_init_txdma_channels(hxgep);
983 	if (status != HXGE_OK) {
984 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
985 		goto hxge_init_fail3;
986 	}
987 
988 	/*
989 	 * Initialize and enable RXDMA channels.
990 	 */
991 	status = hxge_init_rxdma_channels(hxgep);
992 	if (status != HXGE_OK) {
993 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
994 		goto hxge_init_fail4;
995 	}
996 
997 	/*
998 	 * Initialize TCAM
999 	 */
1000 	status = hxge_classify_init(hxgep);
1001 	if (status != HXGE_OK) {
1002 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1003 		goto hxge_init_fail5;
1004 	}
1005 
1006 	/*
1007 	 * Initialize the VMAC block.
1008 	 */
1009 	status = hxge_vmac_init(hxgep);
1010 	if (status != HXGE_OK) {
1011 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1012 		goto hxge_init_fail5;
1013 	}
1014 
1015 	/* Bringup - this may be unnecessary when PXE and FCODE available */
1016 	status = hxge_pfc_set_default_mac_addr(hxgep);
1017 	if (status != HXGE_OK) {
1018 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1019 		    "Default Address Failure\n"));
1020 		goto hxge_init_fail5;
1021 	}
1022 
1023 	hxge_intrs_enable(hxgep);
1024 
1025 	/*
1026 	 * Enable hardware interrupts.
1027 	 */
1028 	hxge_intr_hw_enable(hxgep);
1029 	hxgep->drv_state |= STATE_HW_INITIALIZED;
1030 
1031 	goto hxge_init_exit;
1032 
1033 hxge_init_fail5:
1034 	hxge_uninit_rxdma_channels(hxgep);
1035 hxge_init_fail4:
1036 	hxge_uninit_txdma_channels(hxgep);
1037 hxge_init_fail3:
1038 	hxge_free_mem_pool(hxgep);
1039 hxge_init_fail1:
1040 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1041 	    "<== hxge_init status (failed) = 0x%08x", status));
1042 	return (status);
1043 
1044 hxge_init_exit:
1045 
1046 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1047 	    status));
1048 
1049 	return (status);
1050 }
1051 
1052 timeout_id_t
1053 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1054 {
1055 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1056 		return (timeout(func, (caddr_t)hxgep,
1057 		    drv_usectohz(1000 * msec)));
1058 	}
1059 	return (NULL);
1060 }
1061 
1062 /*ARGSUSED*/
1063 void
1064 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1065 {
1066 	if (timerid) {
1067 		(void) untimeout(timerid);
1068 	}
1069 }
1070 
1071 void
1072 hxge_uninit(p_hxge_t hxgep)
1073 {
1074 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1075 
1076 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1077 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1078 		    "==> hxge_uninit: not initialized"));
1079 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1080 		return;
1081 	}
1082 
1083 	/* Stop timer */
1084 	if (hxgep->hxge_timerid) {
1085 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1086 		hxgep->hxge_timerid = 0;
1087 	}
1088 
1089 	(void) hxge_intr_hw_disable(hxgep);
1090 
1091 	/* Reset the receive VMAC side.  */
1092 	(void) hxge_rx_vmac_disable(hxgep);
1093 
1094 	/* Free classification resources */
1095 	(void) hxge_classify_uninit(hxgep);
1096 
1097 	/* Reset the transmit/receive DMA side.  */
1098 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1099 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1100 
1101 	hxge_uninit_txdma_channels(hxgep);
1102 	hxge_uninit_rxdma_channels(hxgep);
1103 
1104 	/* Reset the transmit VMAC side.  */
1105 	(void) hxge_tx_vmac_disable(hxgep);
1106 
1107 	hxge_free_mem_pool(hxgep);
1108 
1109 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1110 
1111 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1112 }
1113 
1114 void
1115 hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
1116 {
1117 #if defined(__i386)
1118 	size_t		reg;
1119 #else
1120 	uint64_t	reg;
1121 #endif
1122 	uint64_t	regdata;
1123 	int		i, retry;
1124 
1125 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1126 	regdata = 0;
1127 	retry = 1;
1128 
1129 	for (i = 0; i < retry; i++) {
1130 		HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
1131 	}
1132 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1133 }
1134 
1135 void
1136 hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
1137 {
1138 #if defined(__i386)
1139 	size_t		reg;
1140 #else
1141 	uint64_t	reg;
1142 #endif
1143 	uint64_t	buf[2];
1144 
1145 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1146 #if defined(__i386)
1147 	reg = (size_t)buf[0];
1148 #else
1149 	reg = buf[0];
1150 #endif
1151 
1152 	HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
1153 }
1154 
1155 /*ARGSUSED*/
1156 /*VARARGS*/
1157 void
1158 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1159 {
1160 	char		msg_buffer[1048];
1161 	char		prefix_buffer[32];
1162 	int		instance;
1163 	uint64_t	debug_level;
1164 	int		cmn_level = CE_CONT;
1165 	va_list		ap;
1166 
1167 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1168 	    hxgep->hxge_debug_level;
1169 
1170 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1171 	    (level == HXGE_ERR_CTL)) {
1172 		/* do the msg processing */
1173 		if (hxge_debug_init == 0) {
1174 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1175 			hxge_debug_init = 1;
1176 		}
1177 
1178 		MUTEX_ENTER(&hxgedebuglock);
1179 
1180 		if ((level & HXGE_NOTE)) {
1181 			cmn_level = CE_NOTE;
1182 		}
1183 
1184 		if (level & HXGE_ERR_CTL) {
1185 			cmn_level = CE_WARN;
1186 		}
1187 
1188 		va_start(ap, fmt);
1189 		(void) vsprintf(msg_buffer, fmt, ap);
1190 		va_end(ap);
1191 
1192 		if (hxgep == NULL) {
1193 			instance = -1;
1194 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1195 		} else {
1196 			instance = hxgep->instance;
1197 			(void) sprintf(prefix_buffer,
1198 			    "%s%d :", "hxge", instance);
1199 		}
1200 
1201 		MUTEX_EXIT(&hxgedebuglock);
1202 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1203 	}
1204 }
1205 
1206 char *
1207 hxge_dump_packet(char *addr, int size)
1208 {
1209 	uchar_t		*ap = (uchar_t *)addr;
1210 	int		i;
1211 	static char	etherbuf[1024];
1212 	char		*cp = etherbuf;
1213 	char		digits[] = "0123456789abcdef";
1214 
1215 	if (!size)
1216 		size = 60;
1217 
1218 	if (size > MAX_DUMP_SZ) {
1219 		/* Dump the leading bytes */
1220 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1221 			if (*ap > 0x0f)
1222 				*cp++ = digits[*ap >> 4];
1223 			*cp++ = digits[*ap++ & 0xf];
1224 			*cp++ = ':';
1225 		}
1226 		for (i = 0; i < 20; i++)
1227 			*cp++ = '.';
1228 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1229 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1230 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1231 			if (*ap > 0x0f)
1232 				*cp++ = digits[*ap >> 4];
1233 			*cp++ = digits[*ap++ & 0xf];
1234 			*cp++ = ':';
1235 		}
1236 	} else {
1237 		for (i = 0; i < size; i++) {
1238 			if (*ap > 0x0f)
1239 				*cp++ = digits[*ap >> 4];
1240 			*cp++ = digits[*ap++ & 0xf];
1241 			*cp++ = ':';
1242 		}
1243 	}
1244 	*--cp = 0;
1245 	return (etherbuf);
1246 }
1247 
1248 static void
1249 hxge_suspend(p_hxge_t hxgep)
1250 {
1251 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1252 
1253 	hxge_intrs_disable(hxgep);
1254 	hxge_destroy_dev(hxgep);
1255 
1256 	/* Stop the link status timer */
1257 	MUTEX_ENTER(&hxgep->timeout.lock);
1258 	if (hxgep->timeout.id)
1259 		(void) untimeout(hxgep->timeout.id);
1260 	MUTEX_EXIT(&hxgep->timeout.lock);
1261 
1262 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1263 }
1264 
1265 static hxge_status_t
1266 hxge_resume(p_hxge_t hxgep)
1267 {
1268 	hxge_status_t status = HXGE_OK;
1269 
1270 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1271 	hxgep->suspended = DDI_RESUME;
1272 
1273 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1274 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1275 
1276 	(void) hxge_rx_vmac_enable(hxgep);
1277 	(void) hxge_tx_vmac_enable(hxgep);
1278 
1279 	hxge_intrs_enable(hxgep);
1280 
1281 	hxgep->suspended = 0;
1282 
1283 	/* Resume the link status timer */
1284 	MUTEX_ENTER(&hxgep->timeout.lock);
1285 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1286 	    hxgep->timeout.ticks);
1287 	MUTEX_EXIT(&hxgep->timeout.lock);
1288 
1289 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1290 	    "<== hxge_resume status = 0x%x", status));
1291 
1292 	return (status);
1293 }
1294 
1295 hxge_status_t
1296 hxge_setup_dev(p_hxge_t hxgep)
1297 {
1298 	hxge_status_t status = HXGE_OK;
1299 
1300 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1301 
1302 	status = hxge_link_init(hxgep);
1303 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1304 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1305 		    "Bad register acc handle"));
1306 		status = HXGE_ERROR;
1307 	}
1308 
1309 	if (status != HXGE_OK) {
1310 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1311 		    " hxge_setup_dev status (link init 0x%08x)", status));
1312 		goto hxge_setup_dev_exit;
1313 	}
1314 
1315 hxge_setup_dev_exit:
1316 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1317 	    "<== hxge_setup_dev status = 0x%08x", status));
1318 
1319 	return (status);
1320 }
1321 
1322 static void
1323 hxge_destroy_dev(p_hxge_t hxgep)
1324 {
1325 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1326 
1327 	(void) hxge_hw_stop(hxgep);
1328 
1329 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1330 }
1331 
1332 static hxge_status_t
1333 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1334 {
1335 	int			ddi_status = DDI_SUCCESS;
1336 	uint_t			count;
1337 	ddi_dma_cookie_t	cookie;
1338 	uint_t			iommu_pagesize;
1339 	hxge_status_t		status = HXGE_OK;
1340 
1341 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1342 
1343 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1344 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1345 
1346 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1347 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1348 	    " default_block_size %d iommu_pagesize %d",
1349 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1350 	    hxgep->rx_default_block_size, iommu_pagesize));
1351 
1352 	if (iommu_pagesize != 0) {
1353 		if (hxgep->sys_page_sz == iommu_pagesize) {
1354 			/* Hydra support up to 8K pages */
1355 			if (iommu_pagesize > 0x2000)
1356 				hxgep->sys_page_sz = 0x2000;
1357 		} else {
1358 			if (hxgep->sys_page_sz > iommu_pagesize)
1359 				hxgep->sys_page_sz = iommu_pagesize;
1360 		}
1361 	}
1362 
1363 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1364 
1365 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1366 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1367 	    "default_block_size %d page mask %d",
1368 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1369 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1370 
1371 	switch (hxgep->sys_page_sz) {
1372 	default:
1373 		hxgep->sys_page_sz = 0x1000;
1374 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1375 		hxgep->rx_default_block_size = 0x1000;
1376 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1377 		break;
1378 	case 0x1000:
1379 		hxgep->rx_default_block_size = 0x1000;
1380 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1381 		break;
1382 	case 0x2000:
1383 		hxgep->rx_default_block_size = 0x2000;
1384 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1385 		break;
1386 	}
1387 
1388 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1389 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1390 
1391 	/*
1392 	 * Get the system DMA burst size.
1393 	 */
1394 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1395 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1396 	if (ddi_status != DDI_SUCCESS) {
1397 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1398 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1399 		goto hxge_get_soft_properties_exit;
1400 	}
1401 
1402 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1403 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1404 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1405 	    &cookie, &count);
1406 	if (ddi_status != DDI_DMA_MAPPED) {
1407 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1408 		    "Binding spare handle to find system burstsize failed."));
1409 		ddi_status = DDI_FAILURE;
1410 		goto hxge_get_soft_properties_fail1;
1411 	}
1412 
1413 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1414 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1415 
1416 hxge_get_soft_properties_fail1:
1417 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1418 
1419 hxge_get_soft_properties_exit:
1420 
1421 	if (ddi_status != DDI_SUCCESS)
1422 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1423 
1424 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1425 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1426 
1427 	return (status);
1428 }
1429 
1430 hxge_status_t
1431 hxge_alloc_mem_pool(p_hxge_t hxgep)
1432 {
1433 	hxge_status_t status = HXGE_OK;
1434 
1435 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1436 
1437 	status = hxge_alloc_rx_mem_pool(hxgep);
1438 	if (status != HXGE_OK) {
1439 		return (HXGE_ERROR);
1440 	}
1441 
1442 	status = hxge_alloc_tx_mem_pool(hxgep);
1443 	if (status != HXGE_OK) {
1444 		hxge_free_rx_mem_pool(hxgep);
1445 		return (HXGE_ERROR);
1446 	}
1447 
1448 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1449 	return (HXGE_OK);
1450 }
1451 
1452 static void
1453 hxge_free_mem_pool(p_hxge_t hxgep)
1454 {
1455 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1456 
1457 	hxge_free_rx_mem_pool(hxgep);
1458 	hxge_free_tx_mem_pool(hxgep);
1459 
1460 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1461 }
1462 
1463 static hxge_status_t
1464 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1465 {
1466 	int			i, j;
1467 	uint32_t		ndmas, st_rdc;
1468 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1469 	p_hxge_hw_pt_cfg_t	p_cfgp;
1470 	p_hxge_dma_pool_t	dma_poolp;
1471 	p_hxge_dma_common_t	*dma_buf_p;
1472 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1473 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1474 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1475 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1476 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1477 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1478 	size_t			rx_buf_alloc_size;
1479 	size_t			rx_rbr_cntl_alloc_size;
1480 	size_t			rx_rcr_cntl_alloc_size;
1481 	size_t			rx_mbox_cntl_alloc_size;
1482 	uint32_t		*num_chunks;	/* per dma */
1483 	hxge_status_t		status = HXGE_OK;
1484 
1485 	uint32_t		hxge_port_rbr_size;
1486 	uint32_t		hxge_port_rbr_spare_size;
1487 	uint32_t		hxge_port_rcr_size;
1488 
1489 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1490 
1491 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1492 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1493 	st_rdc = p_cfgp->start_rdc;
1494 	ndmas = p_cfgp->max_rdcs;
1495 
1496 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1497 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1498 
1499 	/*
1500 	 * Allocate memory for each receive DMA channel.
1501 	 */
1502 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1503 	    KM_SLEEP);
1504 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1505 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1506 
1507 	dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1508 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1509 	dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1510 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1511 	dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1512 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1513 	dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1514 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1515 	dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1516 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1517 	dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1518 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1519 
1520 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1521 	    KM_SLEEP);
1522 
1523 	/*
1524 	 * Assume that each DMA channel will be configured with default block
1525 	 * size. rbr block counts are mod of batch count (16).
1526 	 */
1527 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1528 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1529 
1530 	if (!hxge_port_rbr_size) {
1531 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1532 	}
1533 
1534 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1535 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1536 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1537 	}
1538 
1539 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1540 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1541 
1542 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1543 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1544 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1545 	}
1546 
1547 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1548 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1549 
1550 	/*
1551 	 * Addresses of receive block ring, receive completion ring and the
1552 	 * mailbox must be all cache-aligned (64 bytes).
1553 	 */
1554 	rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1555 	rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1556 	rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1557 	rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1558 
1559 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1560 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1561 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1562 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1563 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1564 
1565 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1566 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1567 
1568 	/*
1569 	 * Allocate memory for receive buffers and descriptor rings. Replace
1570 	 * allocation functions with interface functions provided by the
1571 	 * partition manager when it is available.
1572 	 */
1573 	/*
1574 	 * Allocate memory for the receive buffer blocks.
1575 	 */
1576 	for (i = 0; i < ndmas; i++) {
1577 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1578 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1579 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1580 		    i, dma_buf_p[i], &dma_buf_p[i]));
1581 
1582 		num_chunks[i] = 0;
1583 
1584 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1585 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1586 		    &num_chunks[i]);
1587 		if (status != HXGE_OK) {
1588 			break;
1589 		}
1590 
1591 		st_rdc++;
1592 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1593 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1594 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1595 		    dma_buf_p[i], &dma_buf_p[i]));
1596 	}
1597 
1598 	if (i < ndmas) {
1599 		goto hxge_alloc_rx_mem_fail1;
1600 	}
1601 
1602 	/*
1603 	 * Allocate memory for descriptor rings and mailbox.
1604 	 */
1605 	st_rdc = p_cfgp->start_rdc;
1606 	for (j = 0; j < ndmas; j++) {
1607 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1608 		    &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1609 		    rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1610 			break;
1611 		}
1612 
1613 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1614 		    &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1615 		    rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1616 			break;
1617 		}
1618 
1619 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1620 		    &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1621 		    rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1622 			break;
1623 		}
1624 		st_rdc++;
1625 	}
1626 
1627 	if (j < ndmas) {
1628 		goto hxge_alloc_rx_mem_fail2;
1629 	}
1630 
1631 	dma_poolp->ndmas = ndmas;
1632 	dma_poolp->num_chunks = num_chunks;
1633 	dma_poolp->buf_allocated = B_TRUE;
1634 	hxgep->rx_buf_pool_p = dma_poolp;
1635 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1636 
1637 	dma_rbr_cntl_poolp->ndmas = ndmas;
1638 	dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1639 	hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1640 	dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1641 
1642 	dma_rcr_cntl_poolp->ndmas = ndmas;
1643 	dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1644 	hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1645 	dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1646 
1647 	dma_mbox_cntl_poolp->ndmas = ndmas;
1648 	dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1649 	hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1650 	dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1651 
1652 	goto hxge_alloc_rx_mem_pool_exit;
1653 
1654 hxge_alloc_rx_mem_fail2:
1655 	/* Free control buffers */
1656 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1657 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1658 	for (; j >= 0; j--) {
1659 		hxge_free_rx_cntl_dma(hxgep,
1660 		    (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1661 		hxge_free_rx_cntl_dma(hxgep,
1662 		    (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1663 		hxge_free_rx_cntl_dma(hxgep,
1664 		    (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1665 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1666 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1667 	}
1668 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1669 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1670 
1671 hxge_alloc_rx_mem_fail1:
1672 	/* Free data buffers */
1673 	i--;
1674 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1675 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1676 	for (; i >= 0; i--) {
1677 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1678 		    num_chunks[i]);
1679 	}
1680 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1681 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1682 
1683 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1684 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1685 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1686 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1687 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1688 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1689 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1690 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1691 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1692 
1693 hxge_alloc_rx_mem_pool_exit:
1694 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1695 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1696 
1697 	return (status);
1698 }
1699 
1700 static void
1701 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1702 {
1703 	uint32_t		i, ndmas;
1704 	p_hxge_dma_pool_t	dma_poolp;
1705 	p_hxge_dma_common_t	*dma_buf_p;
1706 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1707 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1708 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1709 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1710 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1711 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1712 	uint32_t		*num_chunks;
1713 
1714 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1715 
1716 	dma_poolp = hxgep->rx_buf_pool_p;
1717 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1718 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1719 		    "(null rx buf pool or buf not allocated"));
1720 		return;
1721 	}
1722 
1723 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1724 	if (dma_rbr_cntl_poolp == NULL ||
1725 	    (!dma_rbr_cntl_poolp->buf_allocated)) {
1726 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1727 		    "<== hxge_free_rx_mem_pool "
1728 		    "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1729 		return;
1730 	}
1731 
1732 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1733 	if (dma_rcr_cntl_poolp == NULL ||
1734 	    (!dma_rcr_cntl_poolp->buf_allocated)) {
1735 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1736 		    "<== hxge_free_rx_mem_pool "
1737 		    "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1738 		return;
1739 	}
1740 
1741 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1742 	if (dma_mbox_cntl_poolp == NULL ||
1743 	    (!dma_mbox_cntl_poolp->buf_allocated)) {
1744 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1745 		    "<== hxge_free_rx_mem_pool "
1746 		    "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1747 		return;
1748 	}
1749 
1750 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1751 	num_chunks = dma_poolp->num_chunks;
1752 
1753 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1754 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1755 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1756 	ndmas = dma_rbr_cntl_poolp->ndmas;
1757 
1758 	for (i = 0; i < ndmas; i++) {
1759 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1760 	}
1761 
1762 	for (i = 0; i < ndmas; i++) {
1763 		hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1764 		hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1765 		hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1766 	}
1767 
1768 	for (i = 0; i < ndmas; i++) {
1769 		KMEM_FREE(dma_buf_p[i],
1770 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1771 		KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1772 		KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1773 		KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1774 	}
1775 
1776 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1777 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1778 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1779 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1780 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1781 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1782 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1783 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1784 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1785 
1786 	hxgep->rx_buf_pool_p = NULL;
1787 	hxgep->rx_rbr_cntl_pool_p = NULL;
1788 	hxgep->rx_rcr_cntl_pool_p = NULL;
1789 	hxgep->rx_mbox_cntl_pool_p = NULL;
1790 
1791 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1792 }
1793 
1794 static hxge_status_t
1795 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1796     p_hxge_dma_common_t *dmap,
1797     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1798 {
1799 	p_hxge_dma_common_t	rx_dmap;
1800 	hxge_status_t		status = HXGE_OK;
1801 	size_t			total_alloc_size;
1802 	size_t			allocated = 0;
1803 	int			i, size_index, array_size;
1804 
1805 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1806 
1807 	rx_dmap = (p_hxge_dma_common_t)
1808 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1809 
1810 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1811 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1812 	    dma_channel, alloc_size, block_size, dmap));
1813 
1814 	total_alloc_size = alloc_size;
1815 
1816 	i = 0;
1817 	size_index = 0;
1818 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1819 	while ((alloc_sizes[size_index] < alloc_size) &&
1820 	    (size_index < array_size))
1821 		size_index++;
1822 	if (size_index >= array_size) {
1823 		size_index = array_size - 1;
1824 	}
1825 
1826 	while ((allocated < total_alloc_size) &&
1827 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1828 		rx_dmap[i].dma_chunk_index = i;
1829 		rx_dmap[i].block_size = block_size;
1830 		rx_dmap[i].alength = alloc_sizes[size_index];
1831 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1832 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1833 		rx_dmap[i].dma_channel = dma_channel;
1834 		rx_dmap[i].contig_alloc_type = B_FALSE;
1835 
1836 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1837 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1838 		    "i %d nblocks %d alength %d",
1839 		    dma_channel, i, &rx_dmap[i], block_size,
1840 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1841 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1842 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1843 		    &hxge_dev_buf_dma_acc_attr,
1844 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1845 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1846 		if (status != HXGE_OK) {
1847 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1848 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1849 			    " for size: %d", alloc_sizes[size_index]));
1850 			size_index--;
1851 		} else {
1852 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1853 			    " alloc_rx_buf_dma allocated rdc %d "
1854 			    "chunk %d size %x dvma %x bufp %llx ",
1855 			    dma_channel, i, rx_dmap[i].alength,
1856 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1857 			i++;
1858 			allocated += alloc_sizes[size_index];
1859 		}
1860 	}
1861 
1862 	if (allocated < total_alloc_size) {
1863 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1864 		    " hxge_alloc_rx_buf_dma failed due to"
1865 		    " allocated(%d) < required(%d)",
1866 		    allocated, total_alloc_size));
1867 		goto hxge_alloc_rx_mem_fail1;
1868 	}
1869 
1870 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1871 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1872 
1873 	*num_chunks = i;
1874 	*dmap = rx_dmap;
1875 
1876 	goto hxge_alloc_rx_mem_exit;
1877 
1878 hxge_alloc_rx_mem_fail1:
1879 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1880 
1881 hxge_alloc_rx_mem_exit:
1882 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1883 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1884 
1885 	return (status);
1886 }
1887 
1888 /*ARGSUSED*/
1889 static void
1890 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1891     uint32_t num_chunks)
1892 {
1893 	int i;
1894 
1895 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1896 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1897 
1898 	for (i = 0; i < num_chunks; i++) {
1899 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1900 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1901 		hxge_dma_mem_free(dmap++);
1902 	}
1903 
1904 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1905 }
1906 
1907 /*ARGSUSED*/
1908 static hxge_status_t
1909 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1910     p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1911 {
1912 	p_hxge_dma_common_t	rx_dmap;
1913 	hxge_status_t		status = HXGE_OK;
1914 
1915 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1916 
1917 	rx_dmap = (p_hxge_dma_common_t)
1918 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1919 
1920 	rx_dmap->contig_alloc_type = B_FALSE;
1921 
1922 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1923 	    attr, size, &hxge_dev_desc_dma_acc_attr,
1924 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1925 	if (status != HXGE_OK) {
1926 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1927 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1928 		    " for size: %d", size));
1929 		goto hxge_alloc_rx_cntl_dma_fail1;
1930 	}
1931 
1932 	*dmap = rx_dmap;
1933 
1934 	goto hxge_alloc_rx_cntl_dma_exit;
1935 
1936 hxge_alloc_rx_cntl_dma_fail1:
1937 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1938 
1939 hxge_alloc_rx_cntl_dma_exit:
1940 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1941 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1942 
1943 	return (status);
1944 }
1945 
1946 /*ARGSUSED*/
1947 static void
1948 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1949 {
1950 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1951 
1952 	hxge_dma_mem_free(dmap);
1953 
1954 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1955 }
1956 
1957 static hxge_status_t
1958 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1959 {
1960 	hxge_status_t		status = HXGE_OK;
1961 	int			i, j;
1962 	uint32_t		ndmas, st_tdc;
1963 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1964 	p_hxge_hw_pt_cfg_t	p_cfgp;
1965 	p_hxge_dma_pool_t	dma_poolp;
1966 	p_hxge_dma_common_t	*dma_buf_p;
1967 	p_hxge_dma_pool_t	dma_cntl_poolp;
1968 	p_hxge_dma_common_t	*dma_cntl_p;
1969 	size_t			tx_buf_alloc_size;
1970 	size_t			tx_cntl_alloc_size;
1971 	uint32_t		*num_chunks;	/* per dma */
1972 
1973 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1974 
1975 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1976 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1977 	st_tdc = p_cfgp->start_tdc;
1978 	ndmas = p_cfgp->max_tdcs;
1979 
1980 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1981 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1982 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1983 	/*
1984 	 * Allocate memory for each transmit DMA channel.
1985 	 */
1986 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1987 	    KM_SLEEP);
1988 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1989 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1990 
1991 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1992 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1993 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1994 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1995 
1996 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1997 
1998 	/*
1999 	 * Assume that each DMA channel will be configured with default
2000 	 * transmit bufer size for copying transmit data. (For packet payload
2001 	 * over this limit, packets will not be copied.)
2002 	 */
2003 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
2004 
2005 	/*
2006 	 * Addresses of transmit descriptor ring and the mailbox must be all
2007 	 * cache-aligned (64 bytes).
2008 	 */
2009 	tx_cntl_alloc_size = hxge_tx_ring_size;
2010 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
2011 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
2012 
2013 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
2014 	    KM_SLEEP);
2015 
2016 	/*
2017 	 * Allocate memory for transmit buffers and descriptor rings. Replace
2018 	 * allocation functions with interface functions provided by the
2019 	 * partition manager when it is available.
2020 	 *
2021 	 * Allocate memory for the transmit buffer pool.
2022 	 */
2023 	for (i = 0; i < ndmas; i++) {
2024 		num_chunks[i] = 0;
2025 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
2026 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
2027 		if (status != HXGE_OK) {
2028 			break;
2029 		}
2030 		st_tdc++;
2031 	}
2032 
2033 	if (i < ndmas) {
2034 		goto hxge_alloc_tx_mem_pool_fail1;
2035 	}
2036 
2037 	st_tdc = p_cfgp->start_tdc;
2038 
2039 	/*
2040 	 * Allocate memory for descriptor rings and mailbox.
2041 	 */
2042 	for (j = 0; j < ndmas; j++) {
2043 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2044 		    tx_cntl_alloc_size);
2045 		if (status != HXGE_OK) {
2046 			break;
2047 		}
2048 		st_tdc++;
2049 	}
2050 
2051 	if (j < ndmas) {
2052 		goto hxge_alloc_tx_mem_pool_fail2;
2053 	}
2054 
2055 	dma_poolp->ndmas = ndmas;
2056 	dma_poolp->num_chunks = num_chunks;
2057 	dma_poolp->buf_allocated = B_TRUE;
2058 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2059 	hxgep->tx_buf_pool_p = dma_poolp;
2060 
2061 	dma_cntl_poolp->ndmas = ndmas;
2062 	dma_cntl_poolp->buf_allocated = B_TRUE;
2063 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2064 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2065 
2066 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2067 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2068 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2069 
2070 	goto hxge_alloc_tx_mem_pool_exit;
2071 
2072 hxge_alloc_tx_mem_pool_fail2:
2073 	/* Free control buffers */
2074 	j--;
2075 	for (; j >= 0; j--) {
2076 		hxge_free_tx_cntl_dma(hxgep,
2077 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
2078 	}
2079 
2080 hxge_alloc_tx_mem_pool_fail1:
2081 	/* Free data buffers */
2082 	i--;
2083 	for (; i >= 0; i--) {
2084 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2085 		    num_chunks[i]);
2086 	}
2087 
2088 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2089 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2090 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2091 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2092 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2093 
2094 hxge_alloc_tx_mem_pool_exit:
2095 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2096 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2097 
2098 	return (status);
2099 }
2100 
2101 static hxge_status_t
2102 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2103     p_hxge_dma_common_t *dmap, size_t alloc_size,
2104     size_t block_size, uint32_t *num_chunks)
2105 {
2106 	p_hxge_dma_common_t	tx_dmap;
2107 	hxge_status_t		status = HXGE_OK;
2108 	size_t			total_alloc_size;
2109 	size_t			allocated = 0;
2110 	int			i, size_index, array_size;
2111 
2112 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2113 
2114 	tx_dmap = (p_hxge_dma_common_t)
2115 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2116 
2117 	total_alloc_size = alloc_size;
2118 	i = 0;
2119 	size_index = 0;
2120 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
2121 	while ((alloc_sizes[size_index] < alloc_size) &&
2122 	    (size_index < array_size))
2123 		size_index++;
2124 	if (size_index >= array_size) {
2125 		size_index = array_size - 1;
2126 	}
2127 
2128 	while ((allocated < total_alloc_size) &&
2129 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2130 		tx_dmap[i].dma_chunk_index = i;
2131 		tx_dmap[i].block_size = block_size;
2132 		tx_dmap[i].alength = alloc_sizes[size_index];
2133 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2134 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2135 		tx_dmap[i].dma_channel = dma_channel;
2136 		tx_dmap[i].contig_alloc_type = B_FALSE;
2137 
2138 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2139 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
2140 		    &hxge_dev_buf_dma_acc_attr,
2141 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
2142 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
2143 		if (status != HXGE_OK) {
2144 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2145 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
2146 			    " for size: %d", alloc_sizes[size_index]));
2147 			size_index--;
2148 		} else {
2149 			i++;
2150 			allocated += alloc_sizes[size_index];
2151 		}
2152 	}
2153 
2154 	if (allocated < total_alloc_size) {
2155 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2156 		    " hxge_alloc_tx_buf_dma: failed due to"
2157 		    " allocated(%d) < required(%d)",
2158 		    allocated, total_alloc_size));
2159 		goto hxge_alloc_tx_mem_fail1;
2160 	}
2161 
2162 	*num_chunks = i;
2163 	*dmap = tx_dmap;
2164 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2165 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2166 	    *dmap, i));
2167 	goto hxge_alloc_tx_mem_exit;
2168 
2169 hxge_alloc_tx_mem_fail1:
2170 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2171 
2172 hxge_alloc_tx_mem_exit:
2173 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2174 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2175 
2176 	return (status);
2177 }
2178 
2179 /*ARGSUSED*/
2180 static void
2181 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2182     uint32_t num_chunks)
2183 {
2184 	int i;
2185 
2186 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2187 
2188 	for (i = 0; i < num_chunks; i++) {
2189 		hxge_dma_mem_free(dmap++);
2190 	}
2191 
2192 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2193 }
2194 
2195 /*ARGSUSED*/
2196 static hxge_status_t
2197 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2198     p_hxge_dma_common_t *dmap, size_t size)
2199 {
2200 	p_hxge_dma_common_t	tx_dmap;
2201 	hxge_status_t		status = HXGE_OK;
2202 
2203 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2204 
2205 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2206 	    KM_SLEEP);
2207 
2208 	tx_dmap->contig_alloc_type = B_FALSE;
2209 
2210 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2211 	    &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2212 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2213 	if (status != HXGE_OK) {
2214 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2215 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2216 		    " for size: %d", size));
2217 		goto hxge_alloc_tx_cntl_dma_fail1;
2218 	}
2219 
2220 	*dmap = tx_dmap;
2221 
2222 	goto hxge_alloc_tx_cntl_dma_exit;
2223 
2224 hxge_alloc_tx_cntl_dma_fail1:
2225 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2226 
2227 hxge_alloc_tx_cntl_dma_exit:
2228 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2229 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2230 
2231 	return (status);
2232 }
2233 
2234 /*ARGSUSED*/
2235 static void
2236 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2237 {
2238 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2239 
2240 	hxge_dma_mem_free(dmap);
2241 
2242 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2243 }
2244 
2245 static void
2246 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2247 {
2248 	uint32_t		i, ndmas;
2249 	p_hxge_dma_pool_t	dma_poolp;
2250 	p_hxge_dma_common_t	*dma_buf_p;
2251 	p_hxge_dma_pool_t	dma_cntl_poolp;
2252 	p_hxge_dma_common_t	*dma_cntl_p;
2253 	uint32_t		*num_chunks;
2254 
2255 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2256 
2257 	dma_poolp = hxgep->tx_buf_pool_p;
2258 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2259 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2260 		    "<== hxge_free_tx_mem_pool "
2261 		    "(null rx buf pool or buf not allocated"));
2262 		return;
2263 	}
2264 
2265 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2266 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2267 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2268 		    "<== hxge_free_tx_mem_pool "
2269 		    "(null tx cntl buf pool or cntl buf not allocated"));
2270 		return;
2271 	}
2272 
2273 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2274 	num_chunks = dma_poolp->num_chunks;
2275 
2276 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2277 	ndmas = dma_cntl_poolp->ndmas;
2278 
2279 	for (i = 0; i < ndmas; i++) {
2280 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2281 	}
2282 
2283 	for (i = 0; i < ndmas; i++) {
2284 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2285 	}
2286 
2287 	for (i = 0; i < ndmas; i++) {
2288 		KMEM_FREE(dma_buf_p[i],
2289 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2290 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2291 	}
2292 
2293 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2294 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2295 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2296 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2297 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2298 
2299 	hxgep->tx_buf_pool_p = NULL;
2300 	hxgep->tx_cntl_pool_p = NULL;
2301 
2302 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2303 }
2304 
2305 /*ARGSUSED*/
2306 static hxge_status_t
2307 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2308     struct ddi_dma_attr *dma_attrp,
2309     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2310     p_hxge_dma_common_t dma_p)
2311 {
2312 	caddr_t		kaddrp;
2313 	int		ddi_status = DDI_SUCCESS;
2314 
2315 	dma_p->dma_handle = NULL;
2316 	dma_p->acc_handle = NULL;
2317 	dma_p->kaddrp = NULL;
2318 
2319 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2320 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2321 	if (ddi_status != DDI_SUCCESS) {
2322 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2323 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2324 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2325 	}
2326 
2327 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2328 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2329 	    &dma_p->acc_handle);
2330 	if (ddi_status != DDI_SUCCESS) {
2331 		/* The caller will decide whether it is fatal */
2332 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2333 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2334 		ddi_dma_free_handle(&dma_p->dma_handle);
2335 		dma_p->dma_handle = NULL;
2336 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2337 	}
2338 
2339 	if (dma_p->alength < length) {
2340 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2341 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2342 		ddi_dma_mem_free(&dma_p->acc_handle);
2343 		ddi_dma_free_handle(&dma_p->dma_handle);
2344 		dma_p->acc_handle = NULL;
2345 		dma_p->dma_handle = NULL;
2346 		return (HXGE_ERROR);
2347 	}
2348 
2349 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2350 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2351 	    &dma_p->dma_cookie, &dma_p->ncookies);
2352 	if (ddi_status != DDI_DMA_MAPPED) {
2353 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2354 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2355 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2356 		if (dma_p->acc_handle) {
2357 			ddi_dma_mem_free(&dma_p->acc_handle);
2358 			dma_p->acc_handle = NULL;
2359 		}
2360 		ddi_dma_free_handle(&dma_p->dma_handle);
2361 		dma_p->dma_handle = NULL;
2362 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2363 	}
2364 
2365 	if (dma_p->ncookies != 1) {
2366 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2367 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2368 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2369 		if (dma_p->acc_handle) {
2370 			ddi_dma_mem_free(&dma_p->acc_handle);
2371 			dma_p->acc_handle = NULL;
2372 		}
2373 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2374 		ddi_dma_free_handle(&dma_p->dma_handle);
2375 		dma_p->dma_handle = NULL;
2376 		return (HXGE_ERROR);
2377 	}
2378 
2379 	dma_p->kaddrp = kaddrp;
2380 #if defined(__i386)
2381 	dma_p->ioaddr_pp =
2382 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2383 #else
2384 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2385 #endif
2386 
2387 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2388 
2389 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2390 	    "dma buffer allocated: dma_p $%p "
2391 	    "return dmac_ladress from cookie $%p dmac_size %d "
2392 	    "dma_p->ioaddr_p $%p "
2393 	    "dma_p->orig_ioaddr_p $%p "
2394 	    "orig_vatopa $%p "
2395 	    "alength %d (0x%x) "
2396 	    "kaddrp $%p "
2397 	    "length %d (0x%x)",
2398 	    dma_p,
2399 	    dma_p->dma_cookie.dmac_laddress,
2400 	    dma_p->dma_cookie.dmac_size,
2401 	    dma_p->ioaddr_pp,
2402 	    dma_p->orig_ioaddr_pp,
2403 	    dma_p->orig_vatopa,
2404 	    dma_p->alength, dma_p->alength,
2405 	    kaddrp,
2406 	    length, length));
2407 
2408 	return (HXGE_OK);
2409 }
2410 
2411 static void
2412 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2413 {
2414 	if (dma_p == NULL)
2415 		return;
2416 
2417 	if (dma_p->dma_handle != NULL) {
2418 		if (dma_p->ncookies) {
2419 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2420 			dma_p->ncookies = 0;
2421 		}
2422 		ddi_dma_free_handle(&dma_p->dma_handle);
2423 		dma_p->dma_handle = NULL;
2424 	}
2425 
2426 	if (dma_p->acc_handle != NULL) {
2427 		ddi_dma_mem_free(&dma_p->acc_handle);
2428 		dma_p->acc_handle = NULL;
2429 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2430 	}
2431 
2432 	dma_p->kaddrp = NULL;
2433 	dma_p->alength = NULL;
2434 }
2435 
2436 /*
2437  *	hxge_m_start() -- start transmitting and receiving.
2438  *
2439  *	This function is called by the MAC layer when the first
2440  *	stream is open to prepare the hardware ready for sending
2441  *	and transmitting packets.
2442  */
2443 static int
2444 hxge_m_start(void *arg)
2445 {
2446 	p_hxge_t hxgep = (p_hxge_t)arg;
2447 
2448 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2449 
2450 	MUTEX_ENTER(hxgep->genlock);
2451 
2452 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2453 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2454 		    "<== hxge_m_start: initialization failed"));
2455 		MUTEX_EXIT(hxgep->genlock);
2456 		return (EIO);
2457 	}
2458 
2459 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2460 		/*
2461 		 * Start timer to check the system error and tx hangs
2462 		 */
2463 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2464 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2465 
2466 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2467 
2468 		hxgep->timeout.link_status = 0;
2469 		hxgep->timeout.report_link_status = B_TRUE;
2470 		hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2471 
2472 		/* Start the link status timer to check the link status */
2473 		MUTEX_ENTER(&hxgep->timeout.lock);
2474 		hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2475 		    hxgep->timeout.ticks);
2476 		MUTEX_EXIT(&hxgep->timeout.lock);
2477 	}
2478 
2479 	MUTEX_EXIT(hxgep->genlock);
2480 
2481 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2482 
2483 	return (0);
2484 }
2485 
2486 /*
2487  * hxge_m_stop(): stop transmitting and receiving.
2488  */
2489 static void
2490 hxge_m_stop(void *arg)
2491 {
2492 	p_hxge_t hxgep = (p_hxge_t)arg;
2493 
2494 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2495 
2496 	if (hxgep->hxge_timerid) {
2497 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2498 		hxgep->hxge_timerid = 0;
2499 	}
2500 
2501 	/* Stop the link status timer before unregistering */
2502 	MUTEX_ENTER(&hxgep->timeout.lock);
2503 	if (hxgep->timeout.id) {
2504 		(void) untimeout(hxgep->timeout.id);
2505 		hxgep->timeout.id = 0;
2506 	}
2507 	hxge_link_update(hxgep, LINK_STATE_DOWN);
2508 	MUTEX_EXIT(&hxgep->timeout.lock);
2509 
2510 	MUTEX_ENTER(hxgep->genlock);
2511 
2512 	hxge_uninit(hxgep);
2513 
2514 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2515 
2516 	MUTEX_EXIT(hxgep->genlock);
2517 
2518 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2519 }
2520 
2521 static int
2522 hxge_m_unicst(void *arg, const uint8_t *macaddr)
2523 {
2524 	p_hxge_t		hxgep = (p_hxge_t)arg;
2525 	struct ether_addr	addrp;
2526 	hxge_status_t		status;
2527 
2528 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
2529 
2530 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
2531 
2532 	status = hxge_set_mac_addr(hxgep, &addrp);
2533 	if (status != HXGE_OK) {
2534 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2535 		    "<== hxge_m_unicst: set unitcast failed"));
2536 		return (EINVAL);
2537 	}
2538 
2539 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
2540 
2541 	return (0);
2542 }
2543 
2544 static int
2545 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2546 {
2547 	p_hxge_t		hxgep = (p_hxge_t)arg;
2548 	struct ether_addr	addrp;
2549 
2550 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2551 
2552 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2553 
2554 	if (add) {
2555 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2556 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2557 			    "<== hxge_m_multicst: add multicast failed"));
2558 			return (EINVAL);
2559 		}
2560 	} else {
2561 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2562 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2563 			    "<== hxge_m_multicst: del multicast failed"));
2564 			return (EINVAL);
2565 		}
2566 	}
2567 
2568 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2569 
2570 	return (0);
2571 }
2572 
2573 static int
2574 hxge_m_promisc(void *arg, boolean_t on)
2575 {
2576 	p_hxge_t hxgep = (p_hxge_t)arg;
2577 
2578 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2579 
2580 	if (hxge_set_promisc(hxgep, on)) {
2581 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2582 		    "<== hxge_m_promisc: set promisc failed"));
2583 		return (EINVAL);
2584 	}
2585 
2586 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2587 
2588 	return (0);
2589 }
2590 
2591 static void
2592 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2593 {
2594 	p_hxge_t	hxgep = (p_hxge_t)arg;
2595 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2596 	boolean_t	need_privilege;
2597 	int		err;
2598 	int		cmd;
2599 
2600 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2601 
2602 	iocp = (struct iocblk *)mp->b_rptr;
2603 	iocp->ioc_error = 0;
2604 	need_privilege = B_TRUE;
2605 	cmd = iocp->ioc_cmd;
2606 
2607 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2608 	switch (cmd) {
2609 	default:
2610 		miocnak(wq, mp, 0, EINVAL);
2611 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2612 		return;
2613 
2614 	case LB_GET_INFO_SIZE:
2615 	case LB_GET_INFO:
2616 	case LB_GET_MODE:
2617 		need_privilege = B_FALSE;
2618 		break;
2619 
2620 	case LB_SET_MODE:
2621 		break;
2622 
2623 	case ND_GET:
2624 		need_privilege = B_FALSE;
2625 		break;
2626 	case ND_SET:
2627 		break;
2628 
2629 	case HXGE_GET64:
2630 	case HXGE_PUT64:
2631 	case HXGE_GET_TX_RING_SZ:
2632 	case HXGE_GET_TX_DESC:
2633 	case HXGE_TX_SIDE_RESET:
2634 	case HXGE_RX_SIDE_RESET:
2635 	case HXGE_GLOBAL_RESET:
2636 	case HXGE_RESET_MAC:
2637 	case HXGE_PUT_TCAM:
2638 	case HXGE_GET_TCAM:
2639 	case HXGE_RTRACE:
2640 
2641 		need_privilege = B_FALSE;
2642 		break;
2643 	}
2644 
2645 	if (need_privilege) {
2646 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2647 		if (err != 0) {
2648 			miocnak(wq, mp, 0, err);
2649 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2650 			    "<== hxge_m_ioctl: no priv"));
2651 			return;
2652 		}
2653 	}
2654 
2655 	switch (cmd) {
2656 	case ND_GET:
2657 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2658 	case ND_SET:
2659 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2660 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2661 		break;
2662 
2663 	case LB_GET_MODE:
2664 	case LB_SET_MODE:
2665 	case LB_GET_INFO_SIZE:
2666 	case LB_GET_INFO:
2667 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2668 		break;
2669 
2670 	case HXGE_PUT_TCAM:
2671 	case HXGE_GET_TCAM:
2672 	case HXGE_GET64:
2673 	case HXGE_PUT64:
2674 	case HXGE_GET_TX_RING_SZ:
2675 	case HXGE_GET_TX_DESC:
2676 	case HXGE_TX_SIDE_RESET:
2677 	case HXGE_RX_SIDE_RESET:
2678 	case HXGE_GLOBAL_RESET:
2679 	case HXGE_RESET_MAC:
2680 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2681 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2682 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2683 		break;
2684 	}
2685 
2686 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2687 }
2688 
2689 extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
2690 
2691 static void
2692 hxge_m_resources(void *arg)
2693 {
2694 	p_hxge_t hxgep = arg;
2695 	mac_rx_fifo_t mrf;
2696 	p_rx_rcr_rings_t rcr_rings;
2697 	p_rx_rcr_ring_t *rcr_p;
2698 	p_rx_rcr_ring_t rcrp;
2699 	uint32_t i, ndmas;
2700 	int status;
2701 
2702 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources"));
2703 
2704 	MUTEX_ENTER(hxgep->genlock);
2705 
2706 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2707 		status = hxge_init(hxgep);
2708 		if (status != HXGE_OK) {
2709 			HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: "
2710 			    "hxge_init failed"));
2711 			MUTEX_EXIT(hxgep->genlock);
2712 			return;
2713 		}
2714 	}
2715 
2716 	mrf.mrf_type = MAC_RX_FIFO;
2717 	mrf.mrf_blank = hxge_rx_hw_blank;
2718 	mrf.mrf_arg = (void *)hxgep;
2719 
2720 	mrf.mrf_normal_blank_time = RXDMA_RCR_TO_DEFAULT;
2721 	mrf.mrf_normal_pkt_count = RXDMA_RCR_PTHRES_DEFAULT;
2722 
2723 	rcr_rings = hxgep->rx_rcr_rings;
2724 	rcr_p = rcr_rings->rcr_rings;
2725 	ndmas = rcr_rings->ndmas;
2726 
2727 	/*
2728 	 * Export our receive resources to the MAC layer.
2729 	 */
2730 	for (i = 0; i < ndmas; i++) {
2731 		rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i];
2732 		rcrp->rcr_mac_handle =
2733 		    mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf);
2734 
2735 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2736 		    "==> hxge_m_resources: vdma %d dma %d "
2737 		    "rcrptr 0x%016llx mac_handle 0x%016llx",
2738 		    i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle));
2739 	}
2740 
2741 	MUTEX_EXIT(hxgep->genlock);
2742 
2743 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources"));
2744 }
2745 
2746 /*
2747  * Set an alternate MAC address
2748  */
2749 static int
2750 hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot)
2751 {
2752 	uint64_t	address;
2753 	uint64_t	tmp;
2754 	hpi_status_t	status;
2755 	uint8_t		addrn;
2756 	int		i;
2757 
2758 	/*
2759 	 * Convert a byte array to a 48 bit value.
2760 	 * Need to check endianess if in doubt
2761 	 */
2762 	address = 0;
2763 	for (i = 0; i < ETHERADDRL; i++) {
2764 		tmp = maddr[i];
2765 		address <<= 8;
2766 		address |= tmp;
2767 	}
2768 
2769 	addrn = (uint8_t)slot;
2770 	status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address);
2771 	if (status != HPI_SUCCESS)
2772 		return (EIO);
2773 
2774 	return (0);
2775 }
2776 
2777 static void
2778 hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot)
2779 {
2780 	p_hxge_mmac_stats_t	mmac_stats;
2781 	int			i;
2782 	hxge_mmac_t		*mmac_info;
2783 
2784 	mmac_info = &hxgep->hxge_mmac_info;
2785 	mmac_stats = &hxgep->statsp->mmac_stats;
2786 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
2787 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
2788 
2789 	for (i = 0; i < ETHERADDRL; i++) {
2790 		mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] =
2791 		    mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
2792 	}
2793 }
2794 
2795 /*
2796  * Find an unused address slot, set the address value to the one specified,
2797  * enable the port to start filtering on the new MAC address.
2798  * Returns: 0 on success.
2799  */
2800 int
2801 hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
2802 {
2803 	p_hxge_t	hxgep = arg;
2804 	mac_addr_slot_t	slot;
2805 	hxge_mmac_t	*mmac_info;
2806 	int		err;
2807 	hxge_status_t	status;
2808 
2809 	mutex_enter(hxgep->genlock);
2810 
2811 	/*
2812 	 * Make sure that hxge is initialized, if _start() has
2813 	 * not been called.
2814 	 */
2815 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2816 		status = hxge_init(hxgep);
2817 		if (status != HXGE_OK) {
2818 			mutex_exit(hxgep->genlock);
2819 			return (ENXIO);
2820 		}
2821 	}
2822 
2823 	mmac_info = &hxgep->hxge_mmac_info;
2824 	if (mmac_info->naddrfree == 0) {
2825 		mutex_exit(hxgep->genlock);
2826 		return (ENOSPC);
2827 	}
2828 
2829 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2830 	    maddr->mma_addrlen)) {
2831 		mutex_exit(hxgep->genlock);
2832 		return (EINVAL);
2833 	}
2834 
2835 	/*
2836 	 * Search for the first available slot. Because naddrfree
2837 	 * is not zero, we are guaranteed to find one.
2838 	 * Slot 0 is for unique (primary) MAC.  The first alternate
2839 	 * MAC slot is slot 1.
2840 	 */
2841 	for (slot = 1; slot < mmac_info->num_mmac; slot++) {
2842 		if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
2843 			break;
2844 	}
2845 
2846 	ASSERT(slot < mmac_info->num_mmac);
2847 	if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) {
2848 		mutex_exit(hxgep->genlock);
2849 		return (err);
2850 	}
2851 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
2852 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
2853 	mmac_info->naddrfree--;
2854 	hxge_mmac_kstat_update(hxgep, slot);
2855 
2856 	maddr->mma_slot = slot;
2857 
2858 	mutex_exit(hxgep->genlock);
2859 	return (0);
2860 }
2861 
2862 /*
2863  * Remove the specified mac address and update
2864  * the h/w not to filter the mac address anymore.
2865  * Returns: 0, on success.
2866  */
2867 int
2868 hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
2869 {
2870 	p_hxge_t	hxgep = arg;
2871 	hxge_mmac_t	*mmac_info;
2872 	int		err = 0;
2873 	hxge_status_t	status;
2874 
2875 	mutex_enter(hxgep->genlock);
2876 
2877 	/*
2878 	 * Make sure that hxge is initialized, if _start() has
2879 	 * not been called.
2880 	 */
2881 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2882 		status = hxge_init(hxgep);
2883 		if (status != HXGE_OK) {
2884 			mutex_exit(hxgep->genlock);
2885 			return (ENXIO);
2886 		}
2887 	}
2888 
2889 	mmac_info = &hxgep->hxge_mmac_info;
2890 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2891 		mutex_exit(hxgep->genlock);
2892 		return (EINVAL);
2893 	}
2894 
2895 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2896 		if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) ==
2897 		    HPI_SUCCESS) {
2898 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
2899 			mmac_info->naddrfree++;
2900 			/*
2901 			 * Clear mac_pool[slot].addr so that kstat shows 0
2902 			 * alternate MAC address if the slot is not used.
2903 			 */
2904 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
2905 			hxge_mmac_kstat_update(hxgep, slot);
2906 		} else {
2907 			err = EIO;
2908 		}
2909 	} else {
2910 		err = EINVAL;
2911 	}
2912 
2913 	mutex_exit(hxgep->genlock);
2914 	return (err);
2915 }
2916 
2917 /*
2918  * Modify a mac address added by hxge_mmac_add().
2919  * Returns: 0, on success.
2920  */
2921 int
2922 hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
2923 {
2924 	p_hxge_t	hxgep = arg;
2925 	mac_addr_slot_t	slot;
2926 	hxge_mmac_t	*mmac_info;
2927 	int		err = 0;
2928 	hxge_status_t	status;
2929 
2930 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2931 	    maddr->mma_addrlen))
2932 		return (EINVAL);
2933 
2934 	slot = maddr->mma_slot;
2935 
2936 	mutex_enter(hxgep->genlock);
2937 
2938 	/*
2939 	 * Make sure that hxge is initialized, if _start() has
2940 	 * not been called.
2941 	 */
2942 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2943 		status = hxge_init(hxgep);
2944 		if (status != HXGE_OK) {
2945 			mutex_exit(hxgep->genlock);
2946 			return (ENXIO);
2947 		}
2948 	}
2949 
2950 	mmac_info = &hxgep->hxge_mmac_info;
2951 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2952 		mutex_exit(hxgep->genlock);
2953 		return (EINVAL);
2954 	}
2955 
2956 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2957 		if ((err = hxge_altmac_set(hxgep, maddr->mma_addr,
2958 		    slot)) == 0) {
2959 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
2960 			    ETHERADDRL);
2961 			hxge_mmac_kstat_update(hxgep, slot);
2962 		}
2963 	} else {
2964 		err = EINVAL;
2965 	}
2966 
2967 	mutex_exit(hxgep->genlock);
2968 	return (err);
2969 }
2970 
2971 /*
2972  * static int
2973  * hxge_m_mmac_get() - Get the MAC address and other information
2974  *	related to the slot.  mma_flags should be set to 0 in the call.
2975  *	Note: although kstat shows MAC address as zero when a slot is
2976  *	not used, Crossbow expects hxge_m_mmac_get to copy factory MAC
2977  *	to the caller as long as the slot is not using a user MAC address.
2978  *	The following table shows the rules,
2979  *
2980  *     					USED    VENDOR    mma_addr
2981  *	------------------------------------------------------------
2982  *	(1) Slot uses a user MAC:	yes      no     user MAC
2983  *	(2) Slot uses a factory MAC:    yes      yes    factory MAC
2984  *	(3) Slot is not used but is
2985  *	     factory MAC capable:	no       yes    factory MAC
2986  *	(4) Slot is not used and is
2987  *	     not factory MAC capable:   no       no	0
2988  *	------------------------------------------------------------
2989  */
2990 int
2991 hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
2992 {
2993 	hxge_t		*hxgep = arg;
2994 	mac_addr_slot_t	slot;
2995 	hxge_mmac_t	*mmac_info;
2996 	hxge_status_t	status;
2997 
2998 	slot = maddr->mma_slot;
2999 
3000 	mutex_enter(hxgep->genlock);
3001 
3002 	/*
3003 	 * Make sure that hxge is initialized, if _start() has
3004 	 * not been called.
3005 	 */
3006 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
3007 		status = hxge_init(hxgep);
3008 		if (status != HXGE_OK) {
3009 			mutex_exit(hxgep->genlock);
3010 			return (ENXIO);
3011 		}
3012 	}
3013 
3014 	mmac_info = &hxgep->hxge_mmac_info;
3015 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
3016 		mutex_exit(hxgep->genlock);
3017 		return (EINVAL);
3018 	}
3019 
3020 	maddr->mma_flags = 0;
3021 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3022 		maddr->mma_flags |= MMAC_SLOT_USED;
3023 		bcopy(mmac_info->mac_pool[slot].addr,
3024 		    maddr->mma_addr, ETHERADDRL);
3025 		maddr->mma_addrlen = ETHERADDRL;
3026 	}
3027 
3028 	mutex_exit(hxgep->genlock);
3029 	return (0);
3030 }
3031 
3032 /*ARGSUSED*/
3033 boolean_t
3034 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3035 {
3036 	p_hxge_t		hxgep = (p_hxge_t)arg;
3037 	uint32_t		*txflags = cap_data;
3038 	multiaddress_capab_t	*mmacp = cap_data;
3039 
3040 	switch (cap) {
3041 	case MAC_CAPAB_HCKSUM:
3042 		*txflags = HCKSUM_INET_PARTIAL;
3043 		break;
3044 
3045 	case MAC_CAPAB_POLL:
3046 		/*
3047 		 * There's nothing for us to fill in, simply returning B_TRUE
3048 		 * stating that we support polling is sufficient.
3049 		 */
3050 		break;
3051 
3052 	case MAC_CAPAB_MULTIADDRESS:
3053 		/*
3054 		 * The number of MAC addresses made available by
3055 		 * this capability is one less than the total as
3056 		 * the primary address in slot 0 is counted in
3057 		 * the total.
3058 		 */
3059 		mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1;
3060 		mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree;
3061 		mmacp->maddr_flag = 0;	/* No multiple factory macs */
3062 		mmacp->maddr_handle = hxgep;
3063 		mmacp->maddr_add = hxge_m_mmac_add;
3064 		mmacp->maddr_remove = hxge_m_mmac_remove;
3065 		mmacp->maddr_modify = hxge_m_mmac_modify;
3066 		mmacp->maddr_get = hxge_m_mmac_get;
3067 		mmacp->maddr_reserve = NULL;	/* No multiple factory macs */
3068 		break;
3069 	default:
3070 		return (B_FALSE);
3071 	}
3072 	return (B_TRUE);
3073 }
3074 
3075 static boolean_t
3076 hxge_param_locked(mac_prop_id_t pr_num)
3077 {
3078 	/*
3079 	 * All adv_* parameters are locked (read-only) while
3080 	 * the device is in any sort of loopback mode ...
3081 	 */
3082 	switch (pr_num) {
3083 		case MAC_PROP_ADV_1000FDX_CAP:
3084 		case MAC_PROP_EN_1000FDX_CAP:
3085 		case MAC_PROP_ADV_1000HDX_CAP:
3086 		case MAC_PROP_EN_1000HDX_CAP:
3087 		case MAC_PROP_ADV_100FDX_CAP:
3088 		case MAC_PROP_EN_100FDX_CAP:
3089 		case MAC_PROP_ADV_100HDX_CAP:
3090 		case MAC_PROP_EN_100HDX_CAP:
3091 		case MAC_PROP_ADV_10FDX_CAP:
3092 		case MAC_PROP_EN_10FDX_CAP:
3093 		case MAC_PROP_ADV_10HDX_CAP:
3094 		case MAC_PROP_EN_10HDX_CAP:
3095 		case MAC_PROP_AUTONEG:
3096 		case MAC_PROP_FLOWCTRL:
3097 			return (B_TRUE);
3098 	}
3099 	return (B_FALSE);
3100 }
3101 
3102 /*
3103  * callback functions for set/get of properties
3104  */
3105 static int
3106 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3107     uint_t pr_valsize, const void *pr_val)
3108 {
3109 	hxge_t		*hxgep = barg;
3110 	p_hxge_stats_t	statsp;
3111 	int		err = 0;
3112 	uint32_t	new_mtu, old_framesize, new_framesize;
3113 
3114 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3115 
3116 	statsp = hxgep->statsp;
3117 	mutex_enter(hxgep->genlock);
3118 	if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3119 	    hxge_param_locked(pr_num)) {
3120 		/*
3121 		 * All adv_* parameters are locked (read-only)
3122 		 * while the device is in any sort of loopback mode.
3123 		 */
3124 		HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3125 		    "==> hxge_m_setprop: loopback mode: read only"));
3126 		mutex_exit(hxgep->genlock);
3127 		return (EBUSY);
3128 	}
3129 
3130 	switch (pr_num) {
3131 		/*
3132 		 * These properties are either not exist or read only
3133 		 */
3134 		case MAC_PROP_EN_1000FDX_CAP:
3135 		case MAC_PROP_EN_100FDX_CAP:
3136 		case MAC_PROP_EN_10FDX_CAP:
3137 		case MAC_PROP_EN_1000HDX_CAP:
3138 		case MAC_PROP_EN_100HDX_CAP:
3139 		case MAC_PROP_EN_10HDX_CAP:
3140 		case MAC_PROP_ADV_1000FDX_CAP:
3141 		case MAC_PROP_ADV_1000HDX_CAP:
3142 		case MAC_PROP_ADV_100FDX_CAP:
3143 		case MAC_PROP_ADV_100HDX_CAP:
3144 		case MAC_PROP_ADV_10FDX_CAP:
3145 		case MAC_PROP_ADV_10HDX_CAP:
3146 		case MAC_PROP_STATUS:
3147 		case MAC_PROP_SPEED:
3148 		case MAC_PROP_DUPLEX:
3149 		case MAC_PROP_AUTONEG:
3150 		/*
3151 		 * Flow control is handled in the shared domain and
3152 		 * it is readonly here.
3153 		 */
3154 		case MAC_PROP_FLOWCTRL:
3155 			err = EINVAL;
3156 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3157 			    "==> hxge_m_setprop:  read only property %d",
3158 			    pr_num));
3159 			break;
3160 
3161 		case MAC_PROP_MTU:
3162 			if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3163 				err = EBUSY;
3164 				break;
3165 			}
3166 
3167 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3168 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3169 			    "==> hxge_m_setprop: set MTU: %d", new_mtu));
3170 
3171 			new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3172 			if (new_framesize == hxgep->vmac.maxframesize) {
3173 				err = 0;
3174 				break;
3175 			}
3176 
3177 			if (new_framesize < MIN_FRAME_SIZE ||
3178 			    new_framesize > MAX_FRAME_SIZE) {
3179 				err = EINVAL;
3180 				break;
3181 			}
3182 
3183 			old_framesize = hxgep->vmac.maxframesize;
3184 			hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3185 
3186 			if (hxge_vmac_set_framesize(hxgep)) {
3187 				hxgep->vmac.maxframesize =
3188 				    (uint16_t)old_framesize;
3189 				err = EINVAL;
3190 				break;
3191 			}
3192 
3193 			err = mac_maxsdu_update(hxgep->mach, new_mtu);
3194 			if (err) {
3195 				hxgep->vmac.maxframesize =
3196 				    (uint16_t)old_framesize;
3197 				(void) hxge_vmac_set_framesize(hxgep);
3198 			}
3199 
3200 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3201 			    "==> hxge_m_setprop: set MTU: %d maxframe %d",
3202 			    new_mtu, hxgep->vmac.maxframesize));
3203 			break;
3204 
3205 		case MAC_PROP_PRIVATE:
3206 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3207 			    "==> hxge_m_setprop: private property"));
3208 			err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3209 			    pr_val);
3210 			break;
3211 
3212 		default:
3213 			err = ENOTSUP;
3214 			break;
3215 	}
3216 
3217 	mutex_exit(hxgep->genlock);
3218 
3219 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3220 	    "<== hxge_m_setprop (return %d)", err));
3221 
3222 	return (err);
3223 }
3224 
3225 /* ARGSUSED */
3226 static int
3227 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
3228     void *pr_val)
3229 {
3230 	int		err = 0;
3231 	link_flowctrl_t	fl;
3232 
3233 	switch (pr_num) {
3234 	case MAC_PROP_DUPLEX:
3235 		*(uint8_t *)pr_val = 2;
3236 		break;
3237 	case MAC_PROP_AUTONEG:
3238 		*(uint8_t *)pr_val = 0;
3239 		break;
3240 	case MAC_PROP_FLOWCTRL:
3241 		if (pr_valsize < sizeof (link_flowctrl_t))
3242 			return (EINVAL);
3243 		fl = LINK_FLOWCTRL_TX;
3244 		bcopy(&fl, pr_val, sizeof (fl));
3245 		break;
3246 	default:
3247 		err = ENOTSUP;
3248 		break;
3249 	}
3250 	return (err);
3251 }
3252 
3253 static int
3254 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3255     uint_t pr_flags, uint_t pr_valsize, void *pr_val)
3256 {
3257 	hxge_t 		*hxgep = barg;
3258 	p_hxge_stats_t	statsp = hxgep->statsp;
3259 	int		err = 0;
3260 	link_flowctrl_t fl;
3261 	uint64_t	tmp = 0;
3262 	link_state_t	ls;
3263 
3264 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3265 	    "==> hxge_m_getprop: pr_num %d", pr_num));
3266 
3267 	if (pr_valsize == 0)
3268 		return (EINVAL);
3269 
3270 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3271 		err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val);
3272 		return (err);
3273 	}
3274 
3275 	bzero(pr_val, pr_valsize);
3276 	switch (pr_num) {
3277 		case MAC_PROP_DUPLEX:
3278 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3279 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3280 			    "==> hxge_m_getprop: duplex mode %d",
3281 			    *(uint8_t *)pr_val));
3282 			break;
3283 
3284 		case MAC_PROP_SPEED:
3285 			if (pr_valsize < sizeof (uint64_t))
3286 				return (EINVAL);
3287 			tmp = statsp->mac_stats.link_speed * 1000000ull;
3288 			bcopy(&tmp, pr_val, sizeof (tmp));
3289 			break;
3290 
3291 		case MAC_PROP_STATUS:
3292 			if (pr_valsize < sizeof (link_state_t))
3293 				return (EINVAL);
3294 			if (!statsp->mac_stats.link_up)
3295 				ls = LINK_STATE_DOWN;
3296 			else
3297 				ls = LINK_STATE_UP;
3298 			bcopy(&ls, pr_val, sizeof (ls));
3299 			break;
3300 
3301 		case MAC_PROP_FLOWCTRL:
3302 			/*
3303 			 * Flow control is supported by the shared domain and
3304 			 * it is currently transmit only
3305 			 */
3306 			if (pr_valsize < sizeof (link_flowctrl_t))
3307 				return (EINVAL);
3308 			fl = LINK_FLOWCTRL_TX;
3309 			bcopy(&fl, pr_val, sizeof (fl));
3310 			break;
3311 		case MAC_PROP_AUTONEG:
3312 			/* 10G link only and it is not negotiable */
3313 			*(uint8_t *)pr_val = 0;
3314 			break;
3315 		case MAC_PROP_ADV_1000FDX_CAP:
3316 		case MAC_PROP_ADV_100FDX_CAP:
3317 		case MAC_PROP_ADV_10FDX_CAP:
3318 		case MAC_PROP_ADV_1000HDX_CAP:
3319 		case MAC_PROP_ADV_100HDX_CAP:
3320 		case MAC_PROP_ADV_10HDX_CAP:
3321 		case MAC_PROP_EN_1000FDX_CAP:
3322 		case MAC_PROP_EN_100FDX_CAP:
3323 		case MAC_PROP_EN_10FDX_CAP:
3324 		case MAC_PROP_EN_1000HDX_CAP:
3325 		case MAC_PROP_EN_100HDX_CAP:
3326 		case MAC_PROP_EN_10HDX_CAP:
3327 			err = ENOTSUP;
3328 			break;
3329 
3330 		case MAC_PROP_PRIVATE:
3331 			err = hxge_get_priv_prop(hxgep, pr_name, pr_flags,
3332 			    pr_valsize, pr_val);
3333 			break;
3334 		default:
3335 			err = EINVAL;
3336 			break;
3337 	}
3338 
3339 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3340 
3341 	return (err);
3342 }
3343 
3344 /* ARGSUSED */
3345 static int
3346 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3347     const void *pr_val)
3348 {
3349 	p_hxge_param_t	param_arr = hxgep->param_arr;
3350 	int		err = 0;
3351 
3352 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3353 	    "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3354 
3355 	if (pr_val == NULL) {
3356 		return (EINVAL);
3357 	}
3358 
3359 	/* Blanking */
3360 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3361 		err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3362 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3363 	} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3364 		err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3365 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3366 
3367 	/* Classification */
3368 	} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3369 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3370 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3371 	} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3372 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3373 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3374 	} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3375 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3376 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3377 	} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3378 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3379 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3380 	} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3381 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3382 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3383 	} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3384 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3385 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3386 	} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3387 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3388 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3389 	} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3390 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3391 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3392 	} else {
3393 		err = EINVAL;
3394 	}
3395 
3396 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3397 	    "<== hxge_set_priv_prop: err %d", err));
3398 
3399 	return (err);
3400 }
3401 
3402 static int
3403 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags,
3404     uint_t pr_valsize, void *pr_val)
3405 {
3406 	p_hxge_param_t	param_arr = hxgep->param_arr;
3407 	char		valstr[MAXNAMELEN];
3408 	int		err = 0;
3409 	uint_t		strsize;
3410 	int		value = 0;
3411 
3412 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3413 	    "==> hxge_get_priv_prop: property %s", pr_name));
3414 
3415 	if (pr_flags & MAC_PROP_DEFAULT) {
3416 		/* Receive Interrupt Blanking Parameters */
3417 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3418 			value = RXDMA_RCR_TO_DEFAULT;
3419 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3420 			value = RXDMA_RCR_PTHRES_DEFAULT;
3421 
3422 		/* Classification and Load Distribution Configuration */
3423 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3424 		    strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3425 		    strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3426 		    strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3427 		    strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3428 		    strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3429 		    strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3430 		    strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3431 			value = HXGE_CLASS_TCAM_LOOKUP;
3432 		} else {
3433 			err = EINVAL;
3434 		}
3435 	} else {
3436 		/* Receive Interrupt Blanking Parameters */
3437 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3438 			value = hxgep->intr_timeout;
3439 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3440 			value = hxgep->intr_threshold;
3441 
3442 		/* Classification and Load Distribution Configuration */
3443 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3444 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3445 			    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3446 
3447 			value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3448 		} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3449 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3450 			    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3451 
3452 			value = (int)param_arr[param_class_opt_ipv4_udp].value;
3453 		} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3454 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3455 			    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3456 
3457 			value = (int)param_arr[param_class_opt_ipv4_ah].value;
3458 		} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3459 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3460 			    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3461 
3462 			value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3463 		} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3464 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3465 			    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3466 
3467 			value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3468 		} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3469 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3470 			    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3471 
3472 			value = (int)param_arr[param_class_opt_ipv6_udp].value;
3473 		} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3474 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3475 			    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3476 
3477 			value = (int)param_arr[param_class_opt_ipv6_ah].value;
3478 		} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3479 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3480 			    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3481 
3482 			value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3483 		} else {
3484 			err = EINVAL;
3485 		}
3486 	}
3487 
3488 	if (err == 0) {
3489 		(void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3490 
3491 		strsize = (uint_t)strlen(valstr);
3492 		if (pr_valsize < strsize) {
3493 			err = ENOBUFS;
3494 		} else {
3495 			(void) strlcpy(pr_val, valstr, pr_valsize);
3496 		}
3497 	}
3498 
3499 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3500 	    "<== hxge_get_priv_prop: return %d", err));
3501 
3502 	return (err);
3503 }
3504 /*
3505  * Module loading and removing entry points.
3506  */
3507 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3508     nodev, NULL, D_MP, NULL, NULL);
3509 
3510 extern struct mod_ops mod_driverops;
3511 
3512 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
3513 
3514 /*
3515  * Module linkage information for the kernel.
3516  */
3517 static struct modldrv hxge_modldrv = {
3518 	&mod_driverops,
3519 	HXGE_DESC_VER,
3520 	&hxge_dev_ops
3521 };
3522 
3523 static struct modlinkage modlinkage = {
3524 	MODREV_1, (void *) &hxge_modldrv, NULL
3525 };
3526 
3527 int
3528 _init(void)
3529 {
3530 	int status;
3531 
3532 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3533 	mac_init_ops(&hxge_dev_ops, "hxge");
3534 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3535 	if (status != 0) {
3536 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3537 		    "failed to init device soft state"));
3538 		mac_fini_ops(&hxge_dev_ops);
3539 		goto _init_exit;
3540 	}
3541 
3542 	status = mod_install(&modlinkage);
3543 	if (status != 0) {
3544 		ddi_soft_state_fini(&hxge_list);
3545 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3546 		goto _init_exit;
3547 	}
3548 
3549 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3550 
3551 _init_exit:
3552 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3553 
3554 	return (status);
3555 }
3556 
3557 int
3558 _fini(void)
3559 {
3560 	int status;
3561 
3562 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3563 
3564 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3565 
3566 	if (hxge_mblks_pending)
3567 		return (EBUSY);
3568 
3569 	status = mod_remove(&modlinkage);
3570 	if (status != DDI_SUCCESS) {
3571 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
3572 		    "Module removal failed 0x%08x", status));
3573 		goto _fini_exit;
3574 	}
3575 
3576 	mac_fini_ops(&hxge_dev_ops);
3577 
3578 	ddi_soft_state_fini(&hxge_list);
3579 
3580 	MUTEX_DESTROY(&hxge_common_lock);
3581 
3582 _fini_exit:
3583 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3584 
3585 	return (status);
3586 }
3587 
3588 int
3589 _info(struct modinfo *modinfop)
3590 {
3591 	int status;
3592 
3593 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3594 	status = mod_info(&modlinkage, modinfop);
3595 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3596 
3597 	return (status);
3598 }
3599 
3600 /*ARGSUSED*/
3601 hxge_status_t
3602 hxge_add_intrs(p_hxge_t hxgep)
3603 {
3604 	int		intr_types;
3605 	int		type = 0;
3606 	int		ddi_status = DDI_SUCCESS;
3607 	hxge_status_t	status = HXGE_OK;
3608 
3609 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3610 
3611 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
3612 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3613 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
3614 	hxgep->hxge_intr_type.intr_added = 0;
3615 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3616 	hxgep->hxge_intr_type.intr_type = 0;
3617 
3618 	if (hxge_msi_enable) {
3619 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3620 	}
3621 
3622 	/* Get the supported interrupt types */
3623 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3624 	    != DDI_SUCCESS) {
3625 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3626 		    "ddi_intr_get_supported_types failed: status 0x%08x",
3627 		    ddi_status));
3628 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3629 	}
3630 
3631 	hxgep->hxge_intr_type.intr_types = intr_types;
3632 
3633 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3634 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3635 
3636 	/*
3637 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3638 	 *	(1): 1 - MSI
3639 	 *	(2): 2 - MSI-X
3640 	 *	others - FIXED
3641 	 */
3642 	switch (hxge_msi_enable) {
3643 	default:
3644 		type = DDI_INTR_TYPE_FIXED;
3645 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3646 		    "use fixed (intx emulation) type %08x", type));
3647 		break;
3648 
3649 	case 2:
3650 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3651 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3652 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3653 			type = DDI_INTR_TYPE_MSIX;
3654 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3655 			    "==> hxge_add_intrs: "
3656 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3657 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3658 			type = DDI_INTR_TYPE_MSI;
3659 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3660 			    "==> hxge_add_intrs: "
3661 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3662 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3663 			type = DDI_INTR_TYPE_FIXED;
3664 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3665 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3666 		}
3667 		break;
3668 
3669 	case 1:
3670 		if (intr_types & DDI_INTR_TYPE_MSI) {
3671 			type = DDI_INTR_TYPE_MSI;
3672 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3673 			    "==> hxge_add_intrs: "
3674 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3675 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3676 			type = DDI_INTR_TYPE_MSIX;
3677 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3678 			    "==> hxge_add_intrs: "
3679 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3680 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3681 			type = DDI_INTR_TYPE_FIXED;
3682 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3683 			    "==> hxge_add_intrs: "
3684 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3685 		}
3686 	}
3687 
3688 	hxgep->hxge_intr_type.intr_type = type;
3689 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3690 	    type == DDI_INTR_TYPE_FIXED) &&
3691 	    hxgep->hxge_intr_type.niu_msi_enable) {
3692 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3693 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3694 			    " hxge_add_intrs: "
3695 			    " hxge_add_intrs_adv failed: status 0x%08x",
3696 			    status));
3697 			return (status);
3698 		} else {
3699 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3700 			    "interrupts registered : type %d", type));
3701 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3702 
3703 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3704 			    "\nAdded advanced hxge add_intr_adv "
3705 			    "intr type 0x%x\n", type));
3706 
3707 			return (status);
3708 		}
3709 	}
3710 
3711 	if (!hxgep->hxge_intr_type.intr_registered) {
3712 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3713 		    "==> hxge_add_intrs: failed to register interrupts"));
3714 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3715 	}
3716 
3717 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3718 
3719 	return (status);
3720 }
3721 
3722 /*ARGSUSED*/
3723 static hxge_status_t
3724 hxge_add_soft_intrs(p_hxge_t hxgep)
3725 {
3726 	int		ddi_status = DDI_SUCCESS;
3727 	hxge_status_t	status = HXGE_OK;
3728 
3729 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
3730 
3731 	hxgep->resched_id = NULL;
3732 	hxgep->resched_running = B_FALSE;
3733 	ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
3734 	    &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
3735 	if (ddi_status != DDI_SUCCESS) {
3736 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
3737 		    "ddi_add_softintrs failed: status 0x%08x", ddi_status));
3738 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3739 	}
3740 
3741 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
3742 
3743 	return (status);
3744 }
3745 
3746 /*ARGSUSED*/
3747 static hxge_status_t
3748 hxge_add_intrs_adv(p_hxge_t hxgep)
3749 {
3750 	int		intr_type;
3751 	p_hxge_intr_t	intrp;
3752 	hxge_status_t	status;
3753 
3754 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3755 
3756 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3757 	intr_type = intrp->intr_type;
3758 
3759 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3760 	    intr_type));
3761 
3762 	switch (intr_type) {
3763 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3764 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3765 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3766 		break;
3767 
3768 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3769 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3770 		break;
3771 
3772 	default:
3773 		status = HXGE_ERROR;
3774 		break;
3775 	}
3776 
3777 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3778 
3779 	return (status);
3780 }
3781 
3782 /*ARGSUSED*/
3783 static hxge_status_t
3784 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3785 {
3786 	dev_info_t	*dip = hxgep->dip;
3787 	p_hxge_ldg_t	ldgp;
3788 	p_hxge_intr_t	intrp;
3789 	uint_t		*inthandler;
3790 	void		*arg1, *arg2;
3791 	int		behavior;
3792 	int		nintrs, navail;
3793 	int		nactual, nrequired;
3794 	int		inum = 0;
3795 	int		loop = 0;
3796 	int		x, y;
3797 	int		ddi_status = DDI_SUCCESS;
3798 	hxge_status_t	status = HXGE_OK;
3799 
3800 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3801 
3802 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3803 
3804 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3805 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3806 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3807 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3808 		    "nintrs: %d", ddi_status, nintrs));
3809 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3810 	}
3811 
3812 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3813 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3814 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3815 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3816 		    "nintrs: %d", ddi_status, navail));
3817 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3818 	}
3819 
3820 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3821 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3822 	    int_type, nintrs, navail));
3823 
3824 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3825 		/* MSI must be power of 2 */
3826 		if ((navail & 16) == 16) {
3827 			navail = 16;
3828 		} else if ((navail & 8) == 8) {
3829 			navail = 8;
3830 		} else if ((navail & 4) == 4) {
3831 			navail = 4;
3832 		} else if ((navail & 2) == 2) {
3833 			navail = 2;
3834 		} else {
3835 			navail = 1;
3836 		}
3837 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3838 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3839 		    "navail %d", nintrs, navail));
3840 	}
3841 
3842 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3843 	    "requesting: intr type %d nintrs %d, navail %d",
3844 	    int_type, nintrs, navail));
3845 
3846 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3847 	    DDI_INTR_ALLOC_NORMAL);
3848 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3849 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3850 
3851 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3852 	    navail, &nactual, behavior);
3853 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3854 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3855 		    " ddi_intr_alloc() failed: %d", ddi_status));
3856 		kmem_free(intrp->htable, intrp->intr_size);
3857 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3858 	}
3859 
3860 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3861 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3862 	    navail, nactual));
3863 
3864 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3865 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3866 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3867 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3868 		/* Free already allocated interrupts */
3869 		for (y = 0; y < nactual; y++) {
3870 			(void) ddi_intr_free(intrp->htable[y]);
3871 		}
3872 
3873 		kmem_free(intrp->htable, intrp->intr_size);
3874 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3875 	}
3876 
3877 	nrequired = 0;
3878 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3879 	if (status != HXGE_OK) {
3880 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3881 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3882 		    "failed: 0x%x", status));
3883 		/* Free already allocated interrupts */
3884 		for (y = 0; y < nactual; y++) {
3885 			(void) ddi_intr_free(intrp->htable[y]);
3886 		}
3887 
3888 		kmem_free(intrp->htable, intrp->intr_size);
3889 		return (status);
3890 	}
3891 
3892 	ldgp = hxgep->ldgvp->ldgp;
3893 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3894 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3895 
3896 	if (nactual < nrequired)
3897 		loop = nactual;
3898 	else
3899 		loop = nrequired;
3900 
3901 	for (x = 0; x < loop; x++, ldgp++) {
3902 		ldgp->vector = (uint8_t)x;
3903 		arg1 = ldgp->ldvp;
3904 		arg2 = hxgep;
3905 		if (ldgp->nldvs == 1) {
3906 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3907 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3908 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3909 			    "1-1 int handler (entry %d)\n",
3910 			    arg1, arg2, x));
3911 		} else if (ldgp->nldvs > 1) {
3912 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3913 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3914 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3915 			    "nldevs %d int handler (entry %d)\n",
3916 			    arg1, arg2, ldgp->nldvs, x));
3917 		}
3918 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3919 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3920 		    "htable 0x%llx", x, intrp->htable[x]));
3921 
3922 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3923 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3924 		    DDI_SUCCESS) {
3925 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3926 			    "==> hxge_add_intrs_adv_type: failed #%d "
3927 			    "status 0x%x", x, ddi_status));
3928 			for (y = 0; y < intrp->intr_added; y++) {
3929 				(void) ddi_intr_remove_handler(
3930 				    intrp->htable[y]);
3931 			}
3932 
3933 			/* Free already allocated intr */
3934 			for (y = 0; y < nactual; y++) {
3935 				(void) ddi_intr_free(intrp->htable[y]);
3936 			}
3937 			kmem_free(intrp->htable, intrp->intr_size);
3938 
3939 			(void) hxge_ldgv_uninit(hxgep);
3940 
3941 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3942 		}
3943 
3944 		intrp->intr_added++;
3945 	}
3946 	intrp->msi_intx_cnt = nactual;
3947 
3948 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3949 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3950 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3951 
3952 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3953 	(void) hxge_intr_ldgv_init(hxgep);
3954 
3955 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3956 
3957 	return (status);
3958 }
3959 
3960 /*ARGSUSED*/
3961 static hxge_status_t
3962 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3963 {
3964 	dev_info_t	*dip = hxgep->dip;
3965 	p_hxge_ldg_t	ldgp;
3966 	p_hxge_intr_t	intrp;
3967 	uint_t		*inthandler;
3968 	void		*arg1, *arg2;
3969 	int		behavior;
3970 	int		nintrs, navail;
3971 	int		nactual, nrequired;
3972 	int		inum = 0;
3973 	int		x, y;
3974 	int		ddi_status = DDI_SUCCESS;
3975 	hxge_status_t	status = HXGE_OK;
3976 
3977 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3978 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3979 
3980 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3981 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3982 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3983 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3984 		    "nintrs: %d", status, nintrs));
3985 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3986 	}
3987 
3988 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3989 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3990 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3991 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3992 		    "nintrs: %d", ddi_status, navail));
3993 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3994 	}
3995 
3996 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3997 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3998 	    nintrs, navail));
3999 
4000 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4001 	    DDI_INTR_ALLOC_NORMAL);
4002 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4003 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4004 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4005 	    navail, &nactual, behavior);
4006 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
4007 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4008 		    " ddi_intr_alloc() failed: %d", ddi_status));
4009 		kmem_free(intrp->htable, intrp->intr_size);
4010 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4011 	}
4012 
4013 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4014 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4015 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4016 		    " ddi_intr_get_pri() failed: %d", ddi_status));
4017 		/* Free already allocated interrupts */
4018 		for (y = 0; y < nactual; y++) {
4019 			(void) ddi_intr_free(intrp->htable[y]);
4020 		}
4021 
4022 		kmem_free(intrp->htable, intrp->intr_size);
4023 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4024 	}
4025 
4026 	nrequired = 0;
4027 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4028 	if (status != HXGE_OK) {
4029 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4030 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4031 		    "failed: 0x%x", status));
4032 		/* Free already allocated interrupts */
4033 		for (y = 0; y < nactual; y++) {
4034 			(void) ddi_intr_free(intrp->htable[y]);
4035 		}
4036 
4037 		kmem_free(intrp->htable, intrp->intr_size);
4038 		return (status);
4039 	}
4040 
4041 	ldgp = hxgep->ldgvp->ldgp;
4042 	for (x = 0; x < nrequired; x++, ldgp++) {
4043 		ldgp->vector = (uint8_t)x;
4044 		arg1 = ldgp->ldvp;
4045 		arg2 = hxgep;
4046 		if (ldgp->nldvs == 1) {
4047 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4048 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4049 			    "hxge_add_intrs_adv_type_fix: "
4050 			    "1-1 int handler(%d) ldg %d ldv %d "
4051 			    "arg1 $%p arg2 $%p\n",
4052 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4053 		} else if (ldgp->nldvs > 1) {
4054 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4055 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4056 			    "hxge_add_intrs_adv_type_fix: "
4057 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
4058 			    "arg1 0x%016llx arg2 0x%016llx\n",
4059 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4060 			    arg1, arg2));
4061 		}
4062 
4063 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4064 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4065 		    DDI_SUCCESS) {
4066 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4067 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
4068 			    "status 0x%x", x, ddi_status));
4069 			for (y = 0; y < intrp->intr_added; y++) {
4070 				(void) ddi_intr_remove_handler(
4071 				    intrp->htable[y]);
4072 			}
4073 			for (y = 0; y < nactual; y++) {
4074 				(void) ddi_intr_free(intrp->htable[y]);
4075 			}
4076 			/* Free already allocated intr */
4077 			kmem_free(intrp->htable, intrp->intr_size);
4078 
4079 			(void) hxge_ldgv_uninit(hxgep);
4080 
4081 			return (HXGE_ERROR | HXGE_DDI_FAILED);
4082 		}
4083 		intrp->intr_added++;
4084 	}
4085 
4086 	intrp->msi_intx_cnt = nactual;
4087 
4088 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4089 
4090 	status = hxge_intr_ldgv_init(hxgep);
4091 
4092 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4093 
4094 	return (status);
4095 }
4096 
4097 /*ARGSUSED*/
4098 static void
4099 hxge_remove_intrs(p_hxge_t hxgep)
4100 {
4101 	int		i, inum;
4102 	p_hxge_intr_t	intrp;
4103 
4104 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4105 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4106 	if (!intrp->intr_registered) {
4107 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4108 		    "<== hxge_remove_intrs: interrupts not registered"));
4109 		return;
4110 	}
4111 
4112 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4113 
4114 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4115 		(void) ddi_intr_block_disable(intrp->htable,
4116 		    intrp->intr_added);
4117 	} else {
4118 		for (i = 0; i < intrp->intr_added; i++) {
4119 			(void) ddi_intr_disable(intrp->htable[i]);
4120 		}
4121 	}
4122 
4123 	for (inum = 0; inum < intrp->intr_added; inum++) {
4124 		if (intrp->htable[inum]) {
4125 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
4126 		}
4127 	}
4128 
4129 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4130 		if (intrp->htable[inum]) {
4131 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4132 			    "hxge_remove_intrs: ddi_intr_free inum %d "
4133 			    "msi_intx_cnt %d intr_added %d",
4134 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
4135 
4136 			(void) ddi_intr_free(intrp->htable[inum]);
4137 		}
4138 	}
4139 
4140 	kmem_free(intrp->htable, intrp->intr_size);
4141 	intrp->intr_registered = B_FALSE;
4142 	intrp->intr_enabled = B_FALSE;
4143 	intrp->msi_intx_cnt = 0;
4144 	intrp->intr_added = 0;
4145 
4146 	(void) hxge_ldgv_uninit(hxgep);
4147 
4148 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4149 }
4150 
4151 /*ARGSUSED*/
4152 static void
4153 hxge_remove_soft_intrs(p_hxge_t hxgep)
4154 {
4155 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
4156 
4157 	if (hxgep->resched_id) {
4158 		ddi_remove_softintr(hxgep->resched_id);
4159 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4160 		    "==> hxge_remove_soft_intrs: removed"));
4161 		hxgep->resched_id = NULL;
4162 	}
4163 
4164 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
4165 }
4166 
4167 /*ARGSUSED*/
4168 void
4169 hxge_intrs_enable(p_hxge_t hxgep)
4170 {
4171 	p_hxge_intr_t	intrp;
4172 	int		i;
4173 	int		status;
4174 
4175 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4176 
4177 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4178 
4179 	if (!intrp->intr_registered) {
4180 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4181 		    "interrupts are not registered"));
4182 		return;
4183 	}
4184 
4185 	if (intrp->intr_enabled) {
4186 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4187 		    "<== hxge_intrs_enable: already enabled"));
4188 		return;
4189 	}
4190 
4191 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4192 		status = ddi_intr_block_enable(intrp->htable,
4193 		    intrp->intr_added);
4194 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4195 		    "block enable - status 0x%x total inums #%d\n",
4196 		    status, intrp->intr_added));
4197 	} else {
4198 		for (i = 0; i < intrp->intr_added; i++) {
4199 			status = ddi_intr_enable(intrp->htable[i]);
4200 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4201 			    "ddi_intr_enable:enable - status 0x%x "
4202 			    "total inums %d enable inum #%d\n",
4203 			    status, intrp->intr_added, i));
4204 			if (status == DDI_SUCCESS) {
4205 				intrp->intr_enabled = B_TRUE;
4206 			}
4207 		}
4208 	}
4209 
4210 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4211 }
4212 
4213 /*ARGSUSED*/
4214 static void
4215 hxge_intrs_disable(p_hxge_t hxgep)
4216 {
4217 	p_hxge_intr_t	intrp;
4218 	int		i;
4219 
4220 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4221 
4222 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4223 
4224 	if (!intrp->intr_registered) {
4225 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4226 		    "interrupts are not registered"));
4227 		return;
4228 	}
4229 
4230 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4231 		(void) ddi_intr_block_disable(intrp->htable,
4232 		    intrp->intr_added);
4233 	} else {
4234 		for (i = 0; i < intrp->intr_added; i++) {
4235 			(void) ddi_intr_disable(intrp->htable[i]);
4236 		}
4237 	}
4238 
4239 	intrp->intr_enabled = B_FALSE;
4240 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4241 }
4242 
4243 static hxge_status_t
4244 hxge_mac_register(p_hxge_t hxgep)
4245 {
4246 	mac_register_t	*macp;
4247 	int		status;
4248 
4249 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4250 
4251 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4252 		return (HXGE_ERROR);
4253 
4254 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4255 	macp->m_driver = hxgep;
4256 	macp->m_dip = hxgep->dip;
4257 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4258 
4259 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4260 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4261 	    macp->m_src_addr[0],
4262 	    macp->m_src_addr[1],
4263 	    macp->m_src_addr[2],
4264 	    macp->m_src_addr[3],
4265 	    macp->m_src_addr[4],
4266 	    macp->m_src_addr[5]));
4267 
4268 	macp->m_callbacks = &hxge_m_callbacks;
4269 	macp->m_min_sdu = 0;
4270 	macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4271 	macp->m_margin = VLAN_TAGSZ;
4272 	macp->m_priv_props = hxge_priv_props;
4273 	macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS;
4274 
4275 	status = mac_register(macp, &hxgep->mach);
4276 	mac_free(macp);
4277 
4278 	if (status != 0) {
4279 		cmn_err(CE_WARN,
4280 		    "hxge_mac_register failed (status %d instance %d)",
4281 		    status, hxgep->instance);
4282 		return (HXGE_ERROR);
4283 	}
4284 
4285 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4286 	    "(instance %d)", hxgep->instance));
4287 
4288 	return (HXGE_OK);
4289 }
4290 
4291 static int
4292 hxge_init_common_dev(p_hxge_t hxgep)
4293 {
4294 	p_hxge_hw_list_t	hw_p;
4295 	dev_info_t		*p_dip;
4296 
4297 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4298 
4299 	p_dip = hxgep->p_dip;
4300 	MUTEX_ENTER(&hxge_common_lock);
4301 
4302 	/*
4303 	 * Loop through existing per Hydra hardware list.
4304 	 */
4305 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4306 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4307 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4308 		    hw_p, p_dip));
4309 		if (hw_p->parent_devp == p_dip) {
4310 			hxgep->hxge_hw_p = hw_p;
4311 			hw_p->ndevs++;
4312 			hw_p->hxge_p = hxgep;
4313 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4314 			    "==> hxge_init_common_device: "
4315 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4316 			    hw_p, p_dip, hw_p->ndevs));
4317 			break;
4318 		}
4319 	}
4320 
4321 	if (hw_p == NULL) {
4322 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4323 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4324 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4325 		hw_p->parent_devp = p_dip;
4326 		hw_p->magic = HXGE_MAGIC;
4327 		hxgep->hxge_hw_p = hw_p;
4328 		hw_p->ndevs++;
4329 		hw_p->hxge_p = hxgep;
4330 		hw_p->next = hxge_hw_list;
4331 
4332 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4333 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4334 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4335 
4336 		hxge_hw_list = hw_p;
4337 	}
4338 	MUTEX_EXIT(&hxge_common_lock);
4339 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4340 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4341 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4342 
4343 	return (HXGE_OK);
4344 }
4345 
4346 static void
4347 hxge_uninit_common_dev(p_hxge_t hxgep)
4348 {
4349 	p_hxge_hw_list_t	hw_p, h_hw_p;
4350 	dev_info_t		*p_dip;
4351 
4352 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4353 	if (hxgep->hxge_hw_p == NULL) {
4354 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4355 		    "<== hxge_uninit_common_dev (no common)"));
4356 		return;
4357 	}
4358 
4359 	MUTEX_ENTER(&hxge_common_lock);
4360 	h_hw_p = hxge_hw_list;
4361 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4362 		p_dip = hw_p->parent_devp;
4363 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4364 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4365 		    hw_p->magic == HXGE_MAGIC) {
4366 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4367 			    "==> hxge_uninit_common_dev: "
4368 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4369 			    hw_p, p_dip, hw_p->ndevs));
4370 
4371 			hxgep->hxge_hw_p = NULL;
4372 			if (hw_p->ndevs) {
4373 				hw_p->ndevs--;
4374 			}
4375 			hw_p->hxge_p = NULL;
4376 			if (!hw_p->ndevs) {
4377 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4378 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4379 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4380 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4381 				    "==> hxge_uninit_common_dev: "
4382 				    "hw_p $%p parent dip $%p ndevs %d (last)",
4383 				    hw_p, p_dip, hw_p->ndevs));
4384 
4385 				if (hw_p == hxge_hw_list) {
4386 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4387 					    "==> hxge_uninit_common_dev:"
4388 					    "remove head "
4389 					    "hw_p $%p parent dip $%p "
4390 					    "ndevs %d (head)",
4391 					    hw_p, p_dip, hw_p->ndevs));
4392 					hxge_hw_list = hw_p->next;
4393 				} else {
4394 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4395 					    "==> hxge_uninit_common_dev:"
4396 					    "remove middle "
4397 					    "hw_p $%p parent dip $%p "
4398 					    "ndevs %d (middle)",
4399 					    hw_p, p_dip, hw_p->ndevs));
4400 					h_hw_p->next = hw_p->next;
4401 				}
4402 
4403 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4404 			}
4405 			break;
4406 		} else {
4407 			h_hw_p = hw_p;
4408 		}
4409 	}
4410 
4411 	MUTEX_EXIT(&hxge_common_lock);
4412 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4413 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4414 
4415 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4416 }
4417 
4418 static void
4419 hxge_link_poll(void *arg)
4420 {
4421 	p_hxge_t		hxgep = (p_hxge_t)arg;
4422 	hpi_handle_t		handle;
4423 	cip_link_stat_t		link_stat;
4424 	hxge_timeout		*to = &hxgep->timeout;
4425 
4426 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
4427 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4428 
4429 	if (to->report_link_status ||
4430 	    (to->link_status != link_stat.bits.xpcs0_link_up)) {
4431 		to->link_status = link_stat.bits.xpcs0_link_up;
4432 		to->report_link_status = B_FALSE;
4433 
4434 		if (link_stat.bits.xpcs0_link_up) {
4435 			hxge_link_update(hxgep, LINK_STATE_UP);
4436 		} else {
4437 			hxge_link_update(hxgep, LINK_STATE_DOWN);
4438 		}
4439 	}
4440 
4441 	/* Restart the link status timer to check the link status */
4442 	MUTEX_ENTER(&to->lock);
4443 	to->id = timeout(hxge_link_poll, arg, to->ticks);
4444 	MUTEX_EXIT(&to->lock);
4445 }
4446 
4447 static void
4448 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4449 {
4450 	p_hxge_stats_t		statsp = (p_hxge_stats_t)hxgep->statsp;
4451 
4452 	mac_link_update(hxgep->mach, state);
4453 	if (state == LINK_STATE_UP) {
4454 		statsp->mac_stats.link_speed = 10000;
4455 		statsp->mac_stats.link_duplex = 2;
4456 		statsp->mac_stats.link_up = 1;
4457 	} else {
4458 		statsp->mac_stats.link_speed = 0;
4459 		statsp->mac_stats.link_duplex = 0;
4460 		statsp->mac_stats.link_up = 0;
4461 	}
4462 }
4463