xref: /illumos-gate/usr/src/uts/common/io/hxge/hxge_main.c (revision 6cefaae1e90a413ba01560575bb3998e1a3df40e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28  */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31 
32 /*
33  * PSARC/2007/453 MSI-X interrupt limit override
34  * (This PSARC case is limited to MSI-X vectors
35  *  and SPARC platforms only).
36  */
37 #if defined(_BIG_ENDIAN)
38 uint32_t hxge_msi_enable = 2;
39 #else
40 uint32_t hxge_msi_enable = 1;
41 #endif
42 
43 /*
44  * Globals: tunable parameters (/etc/system or adb)
45  *
46  */
47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
48 uint32_t hxge_rbr_spare_size = 0;
49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
55 
56 static hxge_os_mutex_t hxgedebuglock;
57 static int hxge_debug_init = 0;
58 
59 /*
60  * Debugging flags:
61  *		hxge_no_tx_lb : transmit load balancing
62  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
63  *				   1 - From the Stack
64  *				   2 - Destination IP Address
65  */
66 uint32_t hxge_no_tx_lb = 0;
67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
68 
69 /*
70  * Add tunable to reduce the amount of time spent in the
71  * ISR doing Rx Processing.
72  */
73 #if defined(__sparc)
74 uint32_t hxge_max_rx_pkts = 512;
75 #else
76 uint32_t hxge_max_rx_pkts = 1024;
77 #endif
78 
79 /*
80  * Tunables to manage the receive buffer blocks.
81  *
82  * hxge_rx_threshold_hi: copy all buffers.
83  * hxge_rx_bcopy_size_type: receive buffer block size type.
84  * hxge_rx_threshold_lo: copy only up to tunable block size type.
85  */
86 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_7;
87 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
88 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3;
89 
90 rtrace_t hpi_rtracebuf;
91 
92 /*
93  * Function Prototypes
94  */
95 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
96 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
97 static void hxge_unattach(p_hxge_t);
98 
99 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
100 
101 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
102 static void hxge_destroy_mutexes(p_hxge_t);
103 
104 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
105 static void hxge_unmap_regs(p_hxge_t hxgep);
106 
107 hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
108 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
109 static void hxge_remove_intrs(p_hxge_t hxgep);
110 static void hxge_remove_soft_intrs(p_hxge_t hxgep);
111 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
112 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
113 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
114 void hxge_intrs_enable(p_hxge_t hxgep);
115 static void hxge_intrs_disable(p_hxge_t hxgep);
116 static void hxge_suspend(p_hxge_t);
117 static hxge_status_t hxge_resume(p_hxge_t);
118 hxge_status_t hxge_setup_dev(p_hxge_t);
119 static void hxge_destroy_dev(p_hxge_t);
120 hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
121 static void hxge_free_mem_pool(p_hxge_t);
122 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
123 static void hxge_free_rx_mem_pool(p_hxge_t);
124 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
125 static void hxge_free_tx_mem_pool(p_hxge_t);
126 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
127     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
128     p_hxge_dma_common_t);
129 static void hxge_dma_mem_free(p_hxge_dma_common_t);
130 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
131     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
132 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
133 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
134     p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
135 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
136 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
137     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
138 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
139 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
140     p_hxge_dma_common_t *, size_t);
141 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
142 static int hxge_init_common_dev(p_hxge_t);
143 static void hxge_uninit_common_dev(p_hxge_t);
144 
145 /*
146  * The next declarations are for the GLDv3 interface.
147  */
148 static int hxge_m_start(void *);
149 static void hxge_m_stop(void *);
150 static int hxge_m_unicst(void *, const uint8_t *);
151 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
152 static int hxge_m_promisc(void *, boolean_t);
153 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
154 static void hxge_m_resources(void *);
155 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
156 
157 static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
158 static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
159 static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
160 static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
161 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
162 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
163 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
164     uint_t pr_valsize, const void *pr_val);
165 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
166     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *);
167 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num,
168     uint_t pr_valsize, void *pr_val);
169 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
170     uint_t pr_valsize, const void *pr_val);
171 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
172     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
173 static void hxge_link_poll(void *arg);
174 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
175 
176 mac_priv_prop_t hxge_priv_props[] = {
177 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
178 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
179 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
180 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
181 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
182 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
183 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
184 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
185 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
186 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
187 };
188 
189 #define	HXGE_MAX_PRIV_PROPS	\
190 	(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
191 
192 #define	HXGE_MAGIC	0x4E584745UL
193 #define	MAX_DUMP_SZ 256
194 
195 #define	HXGE_M_CALLBACK_FLAGS	\
196 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
197 
198 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
199 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
200 
201 static mac_callbacks_t hxge_m_callbacks = {
202 	HXGE_M_CALLBACK_FLAGS,
203 	hxge_m_stat,
204 	hxge_m_start,
205 	hxge_m_stop,
206 	hxge_m_promisc,
207 	hxge_m_multicst,
208 	hxge_m_unicst,
209 	hxge_m_tx,
210 	hxge_m_resources,
211 	hxge_m_ioctl,
212 	hxge_m_getcapab,
213 	NULL,
214 	NULL,
215 	hxge_m_setprop,
216 	hxge_m_getprop
217 };
218 
219 /* Enable debug messages as necessary. */
220 uint64_t hxge_debug_level = 0;
221 
222 /*
223  * This list contains the instance structures for the Hydra
224  * devices present in the system. The lock exists to guarantee
225  * mutually exclusive access to the list.
226  */
227 void *hxge_list = NULL;
228 void *hxge_hw_list = NULL;
229 hxge_os_mutex_t hxge_common_lock;
230 
231 extern uint64_t hpi_debug_level;
232 
233 extern hxge_status_t hxge_ldgv_init();
234 extern hxge_status_t hxge_ldgv_uninit();
235 extern hxge_status_t hxge_intr_ldgv_init();
236 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
237     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
238 extern void hxge_fm_fini(p_hxge_t hxgep);
239 
240 /*
241  * Count used to maintain the number of buffers being used
242  * by Hydra instances and loaned up to the upper layers.
243  */
244 uint32_t hxge_mblks_pending = 0;
245 
246 /*
247  * Device register access attributes for PIO.
248  */
249 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
250 	DDI_DEVICE_ATTR_V0,
251 	DDI_STRUCTURE_LE_ACC,
252 	DDI_STRICTORDER_ACC,
253 };
254 
255 /*
256  * Device descriptor access attributes for DMA.
257  */
258 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
259 	DDI_DEVICE_ATTR_V0,
260 	DDI_STRUCTURE_LE_ACC,
261 	DDI_STRICTORDER_ACC
262 };
263 
264 /*
265  * Device buffer access attributes for DMA.
266  */
267 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
268 	DDI_DEVICE_ATTR_V0,
269 	DDI_STRUCTURE_BE_ACC,
270 	DDI_STRICTORDER_ACC
271 };
272 
273 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
274 	DMA_ATTR_V0,		/* version number. */
275 	0,			/* low address */
276 	0xffffffffffffffff,	/* high address */
277 	0xffffffffffffffff,	/* address counter max */
278 	0x80000,		/* alignment */
279 	0xfc00fc,		/* dlim_burstsizes */
280 	0x1,			/* minimum transfer size */
281 	0xffffffffffffffff,	/* maximum transfer size */
282 	0xffffffffffffffff,	/* maximum segment size */
283 	1,			/* scatter/gather list length */
284 	(unsigned int)1,	/* granularity */
285 	0			/* attribute flags */
286 };
287 
288 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
289 	DMA_ATTR_V0,		/* version number. */
290 	0,			/* low address */
291 	0xffffffffffffffff,	/* high address */
292 	0xffffffffffffffff,	/* address counter max */
293 	0x100000,		/* alignment */
294 	0xfc00fc,		/* dlim_burstsizes */
295 	0x1,			/* minimum transfer size */
296 	0xffffffffffffffff,	/* maximum transfer size */
297 	0xffffffffffffffff,	/* maximum segment size */
298 	1,			/* scatter/gather list length */
299 	(unsigned int)1,	/* granularity */
300 	0			/* attribute flags */
301 };
302 
303 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
304 	DMA_ATTR_V0,		/* version number. */
305 	0,			/* low address */
306 	0xffffffffffffffff,	/* high address */
307 	0xffffffffffffffff,	/* address counter max */
308 	0x40000,		/* alignment */
309 	0xfc00fc,		/* dlim_burstsizes */
310 	0x1,			/* minimum transfer size */
311 	0xffffffffffffffff,	/* maximum transfer size */
312 	0xffffffffffffffff,	/* maximum segment size */
313 	1,			/* scatter/gather list length */
314 	(unsigned int)1,	/* granularity */
315 	0			/* attribute flags */
316 };
317 
318 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
319 	DMA_ATTR_V0,		/* version number. */
320 	0,			/* low address */
321 	0xffffffffffffffff,	/* high address */
322 	0xffffffffffffffff,	/* address counter max */
323 #if defined(_BIG_ENDIAN)
324 	0x2000,			/* alignment */
325 #else
326 	0x1000,			/* alignment */
327 #endif
328 	0xfc00fc,		/* dlim_burstsizes */
329 	0x1,			/* minimum transfer size */
330 	0xffffffffffffffff,	/* maximum transfer size */
331 	0xffffffffffffffff,	/* maximum segment size */
332 	5,			/* scatter/gather list length */
333 	(unsigned int)1,	/* granularity */
334 	0			/* attribute flags */
335 };
336 
337 ddi_dma_attr_t hxge_tx_dma_attr = {
338 	DMA_ATTR_V0,		/* version number. */
339 	0,			/* low address */
340 	0xffffffffffffffff,	/* high address */
341 	0xffffffffffffffff,	/* address counter max */
342 #if defined(_BIG_ENDIAN)
343 	0x2000,			/* alignment */
344 #else
345 	0x1000,			/* alignment */
346 #endif
347 	0xfc00fc,		/* dlim_burstsizes */
348 	0x1,			/* minimum transfer size */
349 	0xffffffffffffffff,	/* maximum transfer size */
350 	0xffffffffffffffff,	/* maximum segment size */
351 	5,			/* scatter/gather list length */
352 	(unsigned int)1,	/* granularity */
353 	0			/* attribute flags */
354 };
355 
356 ddi_dma_attr_t hxge_rx_dma_attr = {
357 	DMA_ATTR_V0,		/* version number. */
358 	0,			/* low address */
359 	0xffffffffffffffff,	/* high address */
360 	0xffffffffffffffff,	/* address counter max */
361 	0x10000,		/* alignment */
362 	0xfc00fc,		/* dlim_burstsizes */
363 	0x1,			/* minimum transfer size */
364 	0xffffffffffffffff,	/* maximum transfer size */
365 	0xffffffffffffffff,	/* maximum segment size */
366 	1,			/* scatter/gather list length */
367 	(unsigned int)1,	/* granularity */
368 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
369 };
370 
371 ddi_dma_lim_t hxge_dma_limits = {
372 	(uint_t)0,		/* dlim_addr_lo */
373 	(uint_t)0xffffffff,	/* dlim_addr_hi */
374 	(uint_t)0xffffffff,	/* dlim_cntr_max */
375 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
376 	0x1,			/* dlim_minxfer */
377 	1024			/* dlim_speed */
378 };
379 
380 dma_method_t hxge_force_dma = DVMA;
381 
382 /*
383  * dma chunk sizes.
384  *
385  * Try to allocate the largest possible size
386  * so that fewer number of dma chunks would be managed
387  */
388 size_t alloc_sizes[] = {
389     0x1000, 0x2000, 0x4000, 0x8000,
390     0x10000, 0x20000, 0x40000, 0x80000,
391     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
392 };
393 
394 /*
395  * Translate "dev_t" to a pointer to the associated "dev_info_t".
396  */
397 static int
398 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
399 {
400 	p_hxge_t	hxgep = NULL;
401 	int		instance;
402 	int		status = DDI_SUCCESS;
403 
404 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
405 
406 	/*
407 	 * Get the device instance since we'll need to setup or retrieve a soft
408 	 * state for this instance.
409 	 */
410 	instance = ddi_get_instance(dip);
411 
412 	switch (cmd) {
413 	case DDI_ATTACH:
414 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
415 		break;
416 
417 	case DDI_RESUME:
418 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
419 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
420 		if (hxgep == NULL) {
421 			status = DDI_FAILURE;
422 			break;
423 		}
424 		if (hxgep->dip != dip) {
425 			status = DDI_FAILURE;
426 			break;
427 		}
428 		if (hxgep->suspended == DDI_PM_SUSPEND) {
429 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
430 		} else {
431 			(void) hxge_resume(hxgep);
432 		}
433 		goto hxge_attach_exit;
434 
435 	case DDI_PM_RESUME:
436 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
437 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
438 		if (hxgep == NULL) {
439 			status = DDI_FAILURE;
440 			break;
441 		}
442 		if (hxgep->dip != dip) {
443 			status = DDI_FAILURE;
444 			break;
445 		}
446 		(void) hxge_resume(hxgep);
447 		goto hxge_attach_exit;
448 
449 	default:
450 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
451 		status = DDI_FAILURE;
452 		goto hxge_attach_exit;
453 	}
454 
455 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
456 		status = DDI_FAILURE;
457 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
458 		    "ddi_soft_state_zalloc failed"));
459 		goto hxge_attach_exit;
460 	}
461 
462 	hxgep = ddi_get_soft_state(hxge_list, instance);
463 	if (hxgep == NULL) {
464 		status = HXGE_ERROR;
465 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
466 		    "ddi_get_soft_state failed"));
467 		goto hxge_attach_fail2;
468 	}
469 
470 	hxgep->drv_state = 0;
471 	hxgep->dip = dip;
472 	hxgep->instance = instance;
473 	hxgep->p_dip = ddi_get_parent(dip);
474 	hxgep->hxge_debug_level = hxge_debug_level;
475 	hpi_debug_level = hxge_debug_level;
476 
477 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
478 	    &hxge_rx_dma_attr);
479 
480 	status = hxge_map_regs(hxgep);
481 	if (status != HXGE_OK) {
482 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
483 		goto hxge_attach_fail3;
484 	}
485 
486 	status = hxge_init_common_dev(hxgep);
487 	if (status != HXGE_OK) {
488 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
489 		    "hxge_init_common_dev failed"));
490 		goto hxge_attach_fail4;
491 	}
492 
493 	/*
494 	 * Setup the Ndd parameters for this instance.
495 	 */
496 	hxge_init_param(hxgep);
497 
498 	/*
499 	 * Setup Register Tracing Buffer.
500 	 */
501 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
502 
503 	/* init stats ptr */
504 	hxge_init_statsp(hxgep);
505 
506 	status = hxge_setup_mutexes(hxgep);
507 	if (status != HXGE_OK) {
508 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
509 		goto hxge_attach_fail;
510 	}
511 
512 	status = hxge_get_config_properties(hxgep);
513 	if (status != HXGE_OK) {
514 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
515 		goto hxge_attach_fail;
516 	}
517 
518 	/*
519 	 * Setup the Kstats for the driver.
520 	 */
521 	hxge_setup_kstats(hxgep);
522 	hxge_setup_param(hxgep);
523 
524 	status = hxge_setup_system_dma_pages(hxgep);
525 	if (status != HXGE_OK) {
526 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
527 		goto hxge_attach_fail;
528 	}
529 
530 	hxge_hw_id_init(hxgep);
531 	hxge_hw_init_niu_common(hxgep);
532 
533 	status = hxge_setup_dev(hxgep);
534 	if (status != DDI_SUCCESS) {
535 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
536 		goto hxge_attach_fail;
537 	}
538 
539 	status = hxge_add_intrs(hxgep);
540 	if (status != DDI_SUCCESS) {
541 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
542 		goto hxge_attach_fail;
543 	}
544 
545 	status = hxge_add_soft_intrs(hxgep);
546 	if (status != DDI_SUCCESS) {
547 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
548 		goto hxge_attach_fail;
549 	}
550 
551 	/*
552 	 * Enable interrupts.
553 	 */
554 	hxge_intrs_enable(hxgep);
555 
556 	/*
557 	 * Take off all peu parity error mask here after ddi_intr_enable
558 	 * is called
559 	 */
560 	HXGE_REG_WR32(hxgep->hpi_handle, PEU_INTR_MASK, 0x0);
561 
562 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
563 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
564 		    "unable to register to mac layer (%d)", status));
565 		goto hxge_attach_fail;
566 	}
567 	mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
568 
569 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
570 	    instance));
571 
572 	goto hxge_attach_exit;
573 
574 hxge_attach_fail:
575 	hxge_unattach(hxgep);
576 	goto hxge_attach_fail1;
577 
578 hxge_attach_fail5:
579 	/*
580 	 * Tear down the ndd parameters setup.
581 	 */
582 	hxge_destroy_param(hxgep);
583 
584 	/*
585 	 * Tear down the kstat setup.
586 	 */
587 	hxge_destroy_kstats(hxgep);
588 
589 hxge_attach_fail4:
590 	if (hxgep->hxge_hw_p) {
591 		hxge_uninit_common_dev(hxgep);
592 		hxgep->hxge_hw_p = NULL;
593 	}
594 hxge_attach_fail3:
595 	/*
596 	 * Unmap the register setup.
597 	 */
598 	hxge_unmap_regs(hxgep);
599 
600 	hxge_fm_fini(hxgep);
601 
602 hxge_attach_fail2:
603 	ddi_soft_state_free(hxge_list, hxgep->instance);
604 
605 hxge_attach_fail1:
606 	if (status != HXGE_OK)
607 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
608 	hxgep = NULL;
609 
610 hxge_attach_exit:
611 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
612 	    status));
613 
614 	return (status);
615 }
616 
617 static int
618 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
619 {
620 	int		status = DDI_SUCCESS;
621 	int		instance;
622 	p_hxge_t	hxgep = NULL;
623 
624 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
625 	instance = ddi_get_instance(dip);
626 	hxgep = ddi_get_soft_state(hxge_list, instance);
627 	if (hxgep == NULL) {
628 		status = DDI_FAILURE;
629 		goto hxge_detach_exit;
630 	}
631 
632 	switch (cmd) {
633 	case DDI_DETACH:
634 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
635 		break;
636 
637 	case DDI_PM_SUSPEND:
638 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
639 		hxgep->suspended = DDI_PM_SUSPEND;
640 		hxge_suspend(hxgep);
641 		break;
642 
643 	case DDI_SUSPEND:
644 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
645 		if (hxgep->suspended != DDI_PM_SUSPEND) {
646 			hxgep->suspended = DDI_SUSPEND;
647 			hxge_suspend(hxgep);
648 		}
649 		break;
650 
651 	default:
652 		status = DDI_FAILURE;
653 		break;
654 	}
655 
656 	if (cmd != DDI_DETACH)
657 		goto hxge_detach_exit;
658 
659 	/*
660 	 * Stop the xcvr polling.
661 	 */
662 	hxgep->suspended = cmd;
663 
664 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
665 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
666 		    "<== hxge_detach status = 0x%08X", status));
667 		return (DDI_FAILURE);
668 	}
669 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
670 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
671 
672 	hxge_unattach(hxgep);
673 	hxgep = NULL;
674 
675 hxge_detach_exit:
676 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
677 	    status));
678 
679 	return (status);
680 }
681 
682 static void
683 hxge_unattach(p_hxge_t hxgep)
684 {
685 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
686 
687 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
688 		return;
689 	}
690 
691 	if (hxgep->hxge_hw_p) {
692 		hxge_uninit_common_dev(hxgep);
693 		hxgep->hxge_hw_p = NULL;
694 	}
695 
696 	if (hxgep->hxge_timerid) {
697 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
698 		hxgep->hxge_timerid = 0;
699 	}
700 
701 	/* Stop any further interrupts. */
702 	hxge_remove_intrs(hxgep);
703 
704 	/* Remove soft interrups */
705 	hxge_remove_soft_intrs(hxgep);
706 
707 	/* Stop the device and free resources. */
708 	hxge_destroy_dev(hxgep);
709 
710 	/* Tear down the ndd parameters setup. */
711 	hxge_destroy_param(hxgep);
712 
713 	/* Tear down the kstat setup. */
714 	hxge_destroy_kstats(hxgep);
715 
716 	/*
717 	 * Remove the list of ndd parameters which were setup during attach.
718 	 */
719 	if (hxgep->dip) {
720 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
721 		    " hxge_unattach: remove all properties"));
722 		(void) ddi_prop_remove_all(hxgep->dip);
723 	}
724 
725 	/*
726 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
727 	 * previous state before unmapping the registers.
728 	 */
729 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
730 	HXGE_DELAY(1000);
731 
732 	/*
733 	 * Unmap the register setup.
734 	 */
735 	hxge_unmap_regs(hxgep);
736 
737 	hxge_fm_fini(hxgep);
738 
739 	/* Destroy all mutexes.  */
740 	hxge_destroy_mutexes(hxgep);
741 
742 	/*
743 	 * Free the soft state data structures allocated with this instance.
744 	 */
745 	ddi_soft_state_free(hxge_list, hxgep->instance);
746 
747 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
748 }
749 
750 static hxge_status_t
751 hxge_map_regs(p_hxge_t hxgep)
752 {
753 	int		ddi_status = DDI_SUCCESS;
754 	p_dev_regs_t	dev_regs;
755 
756 #ifdef	HXGE_DEBUG
757 	char		*sysname;
758 #endif
759 
760 	off_t		regsize;
761 	hxge_status_t	status = HXGE_OK;
762 	int		nregs;
763 
764 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
765 
766 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
767 		return (HXGE_ERROR);
768 
769 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
770 
771 	hxgep->dev_regs = NULL;
772 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
773 	dev_regs->hxge_regh = NULL;
774 	dev_regs->hxge_pciregh = NULL;
775 	dev_regs->hxge_msix_regh = NULL;
776 
777 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
778 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
779 	    "hxge_map_regs: pci config size 0x%x", regsize));
780 
781 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
782 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
783 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
784 	if (ddi_status != DDI_SUCCESS) {
785 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
786 		    "ddi_map_regs, hxge bus config regs failed"));
787 		goto hxge_map_regs_fail0;
788 	}
789 
790 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
791 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
792 	    dev_regs->hxge_pciregp,
793 	    dev_regs->hxge_pciregh));
794 
795 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
796 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
797 	    "hxge_map_regs: pio size 0x%x", regsize));
798 
799 	/* set up the device mapped register */
800 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
801 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
802 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
803 
804 	if (ddi_status != DDI_SUCCESS) {
805 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
806 		    "ddi_map_regs for Hydra global reg failed"));
807 		goto hxge_map_regs_fail1;
808 	}
809 
810 	/* set up the msi/msi-x mapped register */
811 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
812 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
813 	    "hxge_map_regs: msix size 0x%x", regsize));
814 
815 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
816 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
817 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
818 
819 	if (ddi_status != DDI_SUCCESS) {
820 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
821 		    "ddi_map_regs for msi reg failed"));
822 		goto hxge_map_regs_fail2;
823 	}
824 
825 	hxgep->dev_regs = dev_regs;
826 
827 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
828 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
829 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
830 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
831 
832 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
833 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
834 
835 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
836 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
837 
838 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
839 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
840 
841 	goto hxge_map_regs_exit;
842 
843 hxge_map_regs_fail3:
844 	if (dev_regs->hxge_msix_regh) {
845 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
846 	}
847 
848 hxge_map_regs_fail2:
849 	if (dev_regs->hxge_regh) {
850 		ddi_regs_map_free(&dev_regs->hxge_regh);
851 	}
852 
853 hxge_map_regs_fail1:
854 	if (dev_regs->hxge_pciregh) {
855 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
856 	}
857 
858 hxge_map_regs_fail0:
859 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
860 	kmem_free(dev_regs, sizeof (dev_regs_t));
861 
862 hxge_map_regs_exit:
863 	if (ddi_status != DDI_SUCCESS)
864 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
865 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
866 	return (status);
867 }
868 
869 static void
870 hxge_unmap_regs(p_hxge_t hxgep)
871 {
872 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
873 	if (hxgep->dev_regs) {
874 		if (hxgep->dev_regs->hxge_pciregh) {
875 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
876 			    "==> hxge_unmap_regs: bus"));
877 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
878 			hxgep->dev_regs->hxge_pciregh = NULL;
879 		}
880 
881 		if (hxgep->dev_regs->hxge_regh) {
882 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
883 			    "==> hxge_unmap_regs: device registers"));
884 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
885 			hxgep->dev_regs->hxge_regh = NULL;
886 		}
887 
888 		if (hxgep->dev_regs->hxge_msix_regh) {
889 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
890 			    "==> hxge_unmap_regs: device interrupts"));
891 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
892 			hxgep->dev_regs->hxge_msix_regh = NULL;
893 		}
894 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
895 		hxgep->dev_regs = NULL;
896 	}
897 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
898 }
899 
900 static hxge_status_t
901 hxge_setup_mutexes(p_hxge_t hxgep)
902 {
903 	int		ddi_status = DDI_SUCCESS;
904 	hxge_status_t	status = HXGE_OK;
905 
906 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
907 
908 	/*
909 	 * Get the interrupt cookie so the mutexes can be Initialised.
910 	 */
911 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
912 	    &hxgep->interrupt_cookie);
913 
914 	if (ddi_status != DDI_SUCCESS) {
915 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
916 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
917 		goto hxge_setup_mutexes_exit;
918 	}
919 
920 	/*
921 	 * Initialize mutex's for this device.
922 	 */
923 	MUTEX_INIT(hxgep->genlock, NULL,
924 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
925 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
926 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
927 	RW_INIT(&hxgep->filter_lock, NULL,
928 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
929 	MUTEX_INIT(&hxgep->pio_lock, NULL,
930 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
931 	MUTEX_INIT(&hxgep->timeout.lock, NULL,
932 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
933 
934 hxge_setup_mutexes_exit:
935 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
936 	    "<== hxge_setup_mutexes status = %x", status));
937 
938 	if (ddi_status != DDI_SUCCESS)
939 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
940 
941 	return (status);
942 }
943 
944 static void
945 hxge_destroy_mutexes(p_hxge_t hxgep)
946 {
947 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
948 	RW_DESTROY(&hxgep->filter_lock);
949 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
950 	MUTEX_DESTROY(hxgep->genlock);
951 	MUTEX_DESTROY(&hxgep->pio_lock);
952 	MUTEX_DESTROY(&hxgep->timeout.lock);
953 
954 	if (hxge_debug_init == 1) {
955 		MUTEX_DESTROY(&hxgedebuglock);
956 		hxge_debug_init = 0;
957 	}
958 
959 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
960 }
961 
962 hxge_status_t
963 hxge_init(p_hxge_t hxgep)
964 {
965 	hxge_status_t status = HXGE_OK;
966 
967 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
968 
969 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
970 		return (status);
971 	}
972 
973 	/*
974 	 * Allocate system memory for the receive/transmit buffer blocks and
975 	 * receive/transmit descriptor rings.
976 	 */
977 	status = hxge_alloc_mem_pool(hxgep);
978 	if (status != HXGE_OK) {
979 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
980 		goto hxge_init_fail1;
981 	}
982 
983 	/*
984 	 * Initialize and enable TXDMA channels.
985 	 */
986 	status = hxge_init_txdma_channels(hxgep);
987 	if (status != HXGE_OK) {
988 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
989 		goto hxge_init_fail3;
990 	}
991 
992 	/*
993 	 * Initialize and enable RXDMA channels.
994 	 */
995 	status = hxge_init_rxdma_channels(hxgep);
996 	if (status != HXGE_OK) {
997 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
998 		goto hxge_init_fail4;
999 	}
1000 
1001 	/*
1002 	 * Initialize TCAM
1003 	 */
1004 	status = hxge_classify_init(hxgep);
1005 	if (status != HXGE_OK) {
1006 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1007 		goto hxge_init_fail5;
1008 	}
1009 
1010 	/*
1011 	 * Initialize the VMAC block.
1012 	 */
1013 	status = hxge_vmac_init(hxgep);
1014 	if (status != HXGE_OK) {
1015 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1016 		goto hxge_init_fail5;
1017 	}
1018 
1019 	/* Bringup - this may be unnecessary when PXE and FCODE available */
1020 	status = hxge_pfc_set_default_mac_addr(hxgep);
1021 	if (status != HXGE_OK) {
1022 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1023 		    "Default Address Failure\n"));
1024 		goto hxge_init_fail5;
1025 	}
1026 
1027 	hxge_intrs_enable(hxgep);
1028 
1029 	/*
1030 	 * Enable hardware interrupts.
1031 	 */
1032 	hxge_intr_hw_enable(hxgep);
1033 	hxgep->drv_state |= STATE_HW_INITIALIZED;
1034 
1035 	goto hxge_init_exit;
1036 
1037 hxge_init_fail5:
1038 	hxge_uninit_rxdma_channels(hxgep);
1039 hxge_init_fail4:
1040 	hxge_uninit_txdma_channels(hxgep);
1041 hxge_init_fail3:
1042 	hxge_free_mem_pool(hxgep);
1043 hxge_init_fail1:
1044 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1045 	    "<== hxge_init status (failed) = 0x%08x", status));
1046 	return (status);
1047 
1048 hxge_init_exit:
1049 
1050 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1051 	    status));
1052 
1053 	return (status);
1054 }
1055 
1056 timeout_id_t
1057 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1058 {
1059 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1060 		return (timeout(func, (caddr_t)hxgep,
1061 		    drv_usectohz(1000 * msec)));
1062 	}
1063 	return (NULL);
1064 }
1065 
1066 /*ARGSUSED*/
1067 void
1068 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1069 {
1070 	if (timerid) {
1071 		(void) untimeout(timerid);
1072 	}
1073 }
1074 
1075 void
1076 hxge_uninit(p_hxge_t hxgep)
1077 {
1078 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1079 
1080 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1081 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1082 		    "==> hxge_uninit: not initialized"));
1083 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1084 		return;
1085 	}
1086 
1087 	/* Stop timer */
1088 	if (hxgep->hxge_timerid) {
1089 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1090 		hxgep->hxge_timerid = 0;
1091 	}
1092 
1093 	(void) hxge_intr_hw_disable(hxgep);
1094 
1095 	/* Reset the receive VMAC side.  */
1096 	(void) hxge_rx_vmac_disable(hxgep);
1097 
1098 	/* Free classification resources */
1099 	(void) hxge_classify_uninit(hxgep);
1100 
1101 	/* Reset the transmit/receive DMA side.  */
1102 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1103 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1104 
1105 	hxge_uninit_txdma_channels(hxgep);
1106 	hxge_uninit_rxdma_channels(hxgep);
1107 
1108 	/* Reset the transmit VMAC side.  */
1109 	(void) hxge_tx_vmac_disable(hxgep);
1110 
1111 	hxge_free_mem_pool(hxgep);
1112 
1113 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1114 
1115 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1116 }
1117 
1118 void
1119 hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
1120 {
1121 #if defined(__i386)
1122 	size_t		reg;
1123 #else
1124 	uint64_t	reg;
1125 #endif
1126 	uint64_t	regdata;
1127 	int		i, retry;
1128 
1129 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1130 	regdata = 0;
1131 	retry = 1;
1132 
1133 	for (i = 0; i < retry; i++) {
1134 		HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
1135 	}
1136 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1137 }
1138 
1139 void
1140 hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
1141 {
1142 #if defined(__i386)
1143 	size_t		reg;
1144 #else
1145 	uint64_t	reg;
1146 #endif
1147 	uint64_t	buf[2];
1148 
1149 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1150 #if defined(__i386)
1151 	reg = (size_t)buf[0];
1152 #else
1153 	reg = buf[0];
1154 #endif
1155 
1156 	HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
1157 }
1158 
1159 /*ARGSUSED*/
1160 /*VARARGS*/
1161 void
1162 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1163 {
1164 	char		msg_buffer[1048];
1165 	char		prefix_buffer[32];
1166 	int		instance;
1167 	uint64_t	debug_level;
1168 	int		cmn_level = CE_CONT;
1169 	va_list		ap;
1170 
1171 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1172 	    hxgep->hxge_debug_level;
1173 
1174 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1175 	    (level == HXGE_ERR_CTL)) {
1176 		/* do the msg processing */
1177 		if (hxge_debug_init == 0) {
1178 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1179 			hxge_debug_init = 1;
1180 		}
1181 
1182 		MUTEX_ENTER(&hxgedebuglock);
1183 
1184 		if ((level & HXGE_NOTE)) {
1185 			cmn_level = CE_NOTE;
1186 		}
1187 
1188 		if (level & HXGE_ERR_CTL) {
1189 			cmn_level = CE_WARN;
1190 		}
1191 
1192 		va_start(ap, fmt);
1193 		(void) vsprintf(msg_buffer, fmt, ap);
1194 		va_end(ap);
1195 
1196 		if (hxgep == NULL) {
1197 			instance = -1;
1198 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1199 		} else {
1200 			instance = hxgep->instance;
1201 			(void) sprintf(prefix_buffer,
1202 			    "%s%d :", "hxge", instance);
1203 		}
1204 
1205 		MUTEX_EXIT(&hxgedebuglock);
1206 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1207 	}
1208 }
1209 
1210 char *
1211 hxge_dump_packet(char *addr, int size)
1212 {
1213 	uchar_t		*ap = (uchar_t *)addr;
1214 	int		i;
1215 	static char	etherbuf[1024];
1216 	char		*cp = etherbuf;
1217 	char		digits[] = "0123456789abcdef";
1218 
1219 	if (!size)
1220 		size = 60;
1221 
1222 	if (size > MAX_DUMP_SZ) {
1223 		/* Dump the leading bytes */
1224 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1225 			if (*ap > 0x0f)
1226 				*cp++ = digits[*ap >> 4];
1227 			*cp++ = digits[*ap++ & 0xf];
1228 			*cp++ = ':';
1229 		}
1230 		for (i = 0; i < 20; i++)
1231 			*cp++ = '.';
1232 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1233 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1234 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1235 			if (*ap > 0x0f)
1236 				*cp++ = digits[*ap >> 4];
1237 			*cp++ = digits[*ap++ & 0xf];
1238 			*cp++ = ':';
1239 		}
1240 	} else {
1241 		for (i = 0; i < size; i++) {
1242 			if (*ap > 0x0f)
1243 				*cp++ = digits[*ap >> 4];
1244 			*cp++ = digits[*ap++ & 0xf];
1245 			*cp++ = ':';
1246 		}
1247 	}
1248 	*--cp = 0;
1249 	return (etherbuf);
1250 }
1251 
1252 static void
1253 hxge_suspend(p_hxge_t hxgep)
1254 {
1255 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1256 
1257 	hxge_intrs_disable(hxgep);
1258 	hxge_destroy_dev(hxgep);
1259 
1260 	/* Stop the link status timer */
1261 	MUTEX_ENTER(&hxgep->timeout.lock);
1262 	if (hxgep->timeout.id)
1263 		(void) untimeout(hxgep->timeout.id);
1264 	MUTEX_EXIT(&hxgep->timeout.lock);
1265 
1266 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1267 }
1268 
1269 static hxge_status_t
1270 hxge_resume(p_hxge_t hxgep)
1271 {
1272 	hxge_status_t status = HXGE_OK;
1273 
1274 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1275 	hxgep->suspended = DDI_RESUME;
1276 
1277 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1278 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1279 
1280 	(void) hxge_rx_vmac_enable(hxgep);
1281 	(void) hxge_tx_vmac_enable(hxgep);
1282 
1283 	hxge_intrs_enable(hxgep);
1284 
1285 	hxgep->suspended = 0;
1286 
1287 	/* Resume the link status timer */
1288 	MUTEX_ENTER(&hxgep->timeout.lock);
1289 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1290 	    hxgep->timeout.ticks);
1291 	MUTEX_EXIT(&hxgep->timeout.lock);
1292 
1293 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1294 	    "<== hxge_resume status = 0x%x", status));
1295 
1296 	return (status);
1297 }
1298 
1299 hxge_status_t
1300 hxge_setup_dev(p_hxge_t hxgep)
1301 {
1302 	hxge_status_t status = HXGE_OK;
1303 
1304 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1305 
1306 	status = hxge_link_init(hxgep);
1307 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1308 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1309 		    "Bad register acc handle"));
1310 		status = HXGE_ERROR;
1311 	}
1312 
1313 	if (status != HXGE_OK) {
1314 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1315 		    " hxge_setup_dev status (link init 0x%08x)", status));
1316 		goto hxge_setup_dev_exit;
1317 	}
1318 
1319 hxge_setup_dev_exit:
1320 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1321 	    "<== hxge_setup_dev status = 0x%08x", status));
1322 
1323 	return (status);
1324 }
1325 
1326 static void
1327 hxge_destroy_dev(p_hxge_t hxgep)
1328 {
1329 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1330 
1331 	(void) hxge_hw_stop(hxgep);
1332 
1333 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1334 }
1335 
1336 static hxge_status_t
1337 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1338 {
1339 	int			ddi_status = DDI_SUCCESS;
1340 	uint_t			count;
1341 	ddi_dma_cookie_t	cookie;
1342 	uint_t			iommu_pagesize;
1343 	hxge_status_t		status = HXGE_OK;
1344 
1345 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1346 
1347 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1348 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1349 
1350 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1351 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1352 	    " default_block_size %d iommu_pagesize %d",
1353 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1354 	    hxgep->rx_default_block_size, iommu_pagesize));
1355 
1356 	if (iommu_pagesize != 0) {
1357 		if (hxgep->sys_page_sz == iommu_pagesize) {
1358 			/* Hydra support up to 8K pages */
1359 			if (iommu_pagesize > 0x2000)
1360 				hxgep->sys_page_sz = 0x2000;
1361 		} else {
1362 			if (hxgep->sys_page_sz > iommu_pagesize)
1363 				hxgep->sys_page_sz = iommu_pagesize;
1364 		}
1365 	}
1366 
1367 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1368 
1369 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1370 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1371 	    "default_block_size %d page mask %d",
1372 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1373 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1374 
1375 	switch (hxgep->sys_page_sz) {
1376 	default:
1377 		hxgep->sys_page_sz = 0x1000;
1378 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1379 		hxgep->rx_default_block_size = 0x1000;
1380 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1381 		break;
1382 	case 0x1000:
1383 		hxgep->rx_default_block_size = 0x1000;
1384 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1385 		break;
1386 	case 0x2000:
1387 		hxgep->rx_default_block_size = 0x2000;
1388 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1389 		break;
1390 	}
1391 
1392 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1393 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1394 
1395 	/*
1396 	 * Get the system DMA burst size.
1397 	 */
1398 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1399 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1400 	if (ddi_status != DDI_SUCCESS) {
1401 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1402 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1403 		goto hxge_get_soft_properties_exit;
1404 	}
1405 
1406 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1407 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1408 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1409 	    &cookie, &count);
1410 	if (ddi_status != DDI_DMA_MAPPED) {
1411 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1412 		    "Binding spare handle to find system burstsize failed."));
1413 		ddi_status = DDI_FAILURE;
1414 		goto hxge_get_soft_properties_fail1;
1415 	}
1416 
1417 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1418 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1419 
1420 hxge_get_soft_properties_fail1:
1421 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1422 
1423 hxge_get_soft_properties_exit:
1424 
1425 	if (ddi_status != DDI_SUCCESS)
1426 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1427 
1428 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1429 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1430 
1431 	return (status);
1432 }
1433 
1434 hxge_status_t
1435 hxge_alloc_mem_pool(p_hxge_t hxgep)
1436 {
1437 	hxge_status_t status = HXGE_OK;
1438 
1439 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1440 
1441 	status = hxge_alloc_rx_mem_pool(hxgep);
1442 	if (status != HXGE_OK) {
1443 		return (HXGE_ERROR);
1444 	}
1445 
1446 	status = hxge_alloc_tx_mem_pool(hxgep);
1447 	if (status != HXGE_OK) {
1448 		hxge_free_rx_mem_pool(hxgep);
1449 		return (HXGE_ERROR);
1450 	}
1451 
1452 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1453 	return (HXGE_OK);
1454 }
1455 
1456 static void
1457 hxge_free_mem_pool(p_hxge_t hxgep)
1458 {
1459 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1460 
1461 	hxge_free_rx_mem_pool(hxgep);
1462 	hxge_free_tx_mem_pool(hxgep);
1463 
1464 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1465 }
1466 
1467 static hxge_status_t
1468 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1469 {
1470 	int			i, j;
1471 	uint32_t		ndmas, st_rdc;
1472 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1473 	p_hxge_hw_pt_cfg_t	p_cfgp;
1474 	p_hxge_dma_pool_t	dma_poolp;
1475 	p_hxge_dma_common_t	*dma_buf_p;
1476 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1477 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1478 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1479 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1480 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1481 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1482 	size_t			rx_buf_alloc_size;
1483 	size_t			rx_rbr_cntl_alloc_size;
1484 	size_t			rx_rcr_cntl_alloc_size;
1485 	size_t			rx_mbox_cntl_alloc_size;
1486 	uint32_t		*num_chunks;	/* per dma */
1487 	hxge_status_t		status = HXGE_OK;
1488 
1489 	uint32_t		hxge_port_rbr_size;
1490 	uint32_t		hxge_port_rbr_spare_size;
1491 	uint32_t		hxge_port_rcr_size;
1492 
1493 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1494 
1495 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1496 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1497 	st_rdc = p_cfgp->start_rdc;
1498 	ndmas = p_cfgp->max_rdcs;
1499 
1500 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1501 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1502 
1503 	/*
1504 	 * Allocate memory for each receive DMA channel.
1505 	 */
1506 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1507 	    KM_SLEEP);
1508 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1509 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1510 
1511 	dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1512 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1513 	dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1514 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1515 	dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1516 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1517 	dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1518 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1519 	dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1520 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1521 	dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1522 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1523 
1524 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1525 	    KM_SLEEP);
1526 
1527 	/*
1528 	 * Assume that each DMA channel will be configured with default block
1529 	 * size. rbr block counts are mod of batch count (16).
1530 	 */
1531 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1532 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1533 
1534 	if (!hxge_port_rbr_size) {
1535 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1536 	}
1537 
1538 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1539 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1540 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1541 	}
1542 
1543 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1544 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1545 
1546 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1547 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1548 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1549 	}
1550 
1551 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1552 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1553 
1554 	/*
1555 	 * Addresses of receive block ring, receive completion ring and the
1556 	 * mailbox must be all cache-aligned (64 bytes).
1557 	 */
1558 	rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1559 	rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1560 	rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1561 	rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1562 
1563 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1564 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1565 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1566 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1567 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1568 
1569 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1570 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1571 
1572 	/*
1573 	 * Allocate memory for receive buffers and descriptor rings. Replace
1574 	 * allocation functions with interface functions provided by the
1575 	 * partition manager when it is available.
1576 	 */
1577 	/*
1578 	 * Allocate memory for the receive buffer blocks.
1579 	 */
1580 	for (i = 0; i < ndmas; i++) {
1581 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1582 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1583 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1584 		    i, dma_buf_p[i], &dma_buf_p[i]));
1585 
1586 		num_chunks[i] = 0;
1587 
1588 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1589 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1590 		    &num_chunks[i]);
1591 		if (status != HXGE_OK) {
1592 			break;
1593 		}
1594 
1595 		st_rdc++;
1596 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1597 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1598 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1599 		    dma_buf_p[i], &dma_buf_p[i]));
1600 	}
1601 
1602 	if (i < ndmas) {
1603 		goto hxge_alloc_rx_mem_fail1;
1604 	}
1605 
1606 	/*
1607 	 * Allocate memory for descriptor rings and mailbox.
1608 	 */
1609 	st_rdc = p_cfgp->start_rdc;
1610 	for (j = 0; j < ndmas; j++) {
1611 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1612 		    &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1613 		    rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1614 			break;
1615 		}
1616 
1617 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1618 		    &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1619 		    rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1620 			break;
1621 		}
1622 
1623 		if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1624 		    &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1625 		    rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1626 			break;
1627 		}
1628 		st_rdc++;
1629 	}
1630 
1631 	if (j < ndmas) {
1632 		goto hxge_alloc_rx_mem_fail2;
1633 	}
1634 
1635 	dma_poolp->ndmas = ndmas;
1636 	dma_poolp->num_chunks = num_chunks;
1637 	dma_poolp->buf_allocated = B_TRUE;
1638 	hxgep->rx_buf_pool_p = dma_poolp;
1639 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1640 
1641 	dma_rbr_cntl_poolp->ndmas = ndmas;
1642 	dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1643 	hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1644 	dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1645 
1646 	dma_rcr_cntl_poolp->ndmas = ndmas;
1647 	dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1648 	hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1649 	dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1650 
1651 	dma_mbox_cntl_poolp->ndmas = ndmas;
1652 	dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1653 	hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1654 	dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1655 
1656 	goto hxge_alloc_rx_mem_pool_exit;
1657 
1658 hxge_alloc_rx_mem_fail2:
1659 	/* Free control buffers */
1660 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1661 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1662 	for (; j >= 0; j--) {
1663 		hxge_free_rx_cntl_dma(hxgep,
1664 		    (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1665 		hxge_free_rx_cntl_dma(hxgep,
1666 		    (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1667 		hxge_free_rx_cntl_dma(hxgep,
1668 		    (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1669 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1670 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1671 	}
1672 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1673 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1674 
1675 hxge_alloc_rx_mem_fail1:
1676 	/* Free data buffers */
1677 	i--;
1678 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1679 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1680 	for (; i >= 0; i--) {
1681 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1682 		    num_chunks[i]);
1683 	}
1684 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1685 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1686 
1687 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1688 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1689 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1690 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1691 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1692 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1693 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1694 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1695 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1696 
1697 hxge_alloc_rx_mem_pool_exit:
1698 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1699 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1700 
1701 	return (status);
1702 }
1703 
1704 static void
1705 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1706 {
1707 	uint32_t		i, ndmas;
1708 	p_hxge_dma_pool_t	dma_poolp;
1709 	p_hxge_dma_common_t	*dma_buf_p;
1710 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
1711 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
1712 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
1713 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
1714 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
1715 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
1716 	uint32_t		*num_chunks;
1717 
1718 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1719 
1720 	dma_poolp = hxgep->rx_buf_pool_p;
1721 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1722 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1723 		    "(null rx buf pool or buf not allocated"));
1724 		return;
1725 	}
1726 
1727 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1728 	if (dma_rbr_cntl_poolp == NULL ||
1729 	    (!dma_rbr_cntl_poolp->buf_allocated)) {
1730 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1731 		    "<== hxge_free_rx_mem_pool "
1732 		    "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1733 		return;
1734 	}
1735 
1736 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1737 	if (dma_rcr_cntl_poolp == NULL ||
1738 	    (!dma_rcr_cntl_poolp->buf_allocated)) {
1739 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1740 		    "<== hxge_free_rx_mem_pool "
1741 		    "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1742 		return;
1743 	}
1744 
1745 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1746 	if (dma_mbox_cntl_poolp == NULL ||
1747 	    (!dma_mbox_cntl_poolp->buf_allocated)) {
1748 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1749 		    "<== hxge_free_rx_mem_pool "
1750 		    "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1751 		return;
1752 	}
1753 
1754 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1755 	num_chunks = dma_poolp->num_chunks;
1756 
1757 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1758 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1759 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1760 	ndmas = dma_rbr_cntl_poolp->ndmas;
1761 
1762 	for (i = 0; i < ndmas; i++) {
1763 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1764 	}
1765 
1766 	for (i = 0; i < ndmas; i++) {
1767 		hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1768 		hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1769 		hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1770 	}
1771 
1772 	for (i = 0; i < ndmas; i++) {
1773 		KMEM_FREE(dma_buf_p[i],
1774 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1775 		KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1776 		KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1777 		KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1778 	}
1779 
1780 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1781 	KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1782 	KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1783 	KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1784 	KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1785 	KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1786 	KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1787 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1788 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1789 
1790 	hxgep->rx_buf_pool_p = NULL;
1791 	hxgep->rx_rbr_cntl_pool_p = NULL;
1792 	hxgep->rx_rcr_cntl_pool_p = NULL;
1793 	hxgep->rx_mbox_cntl_pool_p = NULL;
1794 
1795 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1796 }
1797 
1798 static hxge_status_t
1799 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1800     p_hxge_dma_common_t *dmap,
1801     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1802 {
1803 	p_hxge_dma_common_t	rx_dmap;
1804 	hxge_status_t		status = HXGE_OK;
1805 	size_t			total_alloc_size;
1806 	size_t			allocated = 0;
1807 	int			i, size_index, array_size;
1808 
1809 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1810 
1811 	rx_dmap = (p_hxge_dma_common_t)
1812 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1813 
1814 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1815 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1816 	    dma_channel, alloc_size, block_size, dmap));
1817 
1818 	total_alloc_size = alloc_size;
1819 
1820 	i = 0;
1821 	size_index = 0;
1822 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1823 	while ((size_index < array_size) &&
1824 	    (alloc_sizes[size_index] < alloc_size))
1825 		size_index++;
1826 	if (size_index >= array_size) {
1827 		size_index = array_size - 1;
1828 	}
1829 
1830 	while ((allocated < total_alloc_size) &&
1831 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1832 		rx_dmap[i].dma_chunk_index = i;
1833 		rx_dmap[i].block_size = block_size;
1834 		rx_dmap[i].alength = alloc_sizes[size_index];
1835 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1836 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1837 		rx_dmap[i].dma_channel = dma_channel;
1838 		rx_dmap[i].contig_alloc_type = B_FALSE;
1839 
1840 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1841 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1842 		    "i %d nblocks %d alength %d",
1843 		    dma_channel, i, &rx_dmap[i], block_size,
1844 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1845 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1846 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1847 		    &hxge_dev_buf_dma_acc_attr,
1848 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1849 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1850 		if (status != HXGE_OK) {
1851 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1852 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1853 			    " for size: %d", alloc_sizes[size_index]));
1854 			size_index--;
1855 		} else {
1856 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1857 			    " alloc_rx_buf_dma allocated rdc %d "
1858 			    "chunk %d size %x dvma %x bufp %llx ",
1859 			    dma_channel, i, rx_dmap[i].alength,
1860 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1861 			i++;
1862 			allocated += alloc_sizes[size_index];
1863 		}
1864 	}
1865 
1866 	if (allocated < total_alloc_size) {
1867 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1868 		    " hxge_alloc_rx_buf_dma failed due to"
1869 		    " allocated(%d) < required(%d)",
1870 		    allocated, total_alloc_size));
1871 		goto hxge_alloc_rx_mem_fail1;
1872 	}
1873 
1874 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1875 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1876 
1877 	*num_chunks = i;
1878 	*dmap = rx_dmap;
1879 
1880 	goto hxge_alloc_rx_mem_exit;
1881 
1882 hxge_alloc_rx_mem_fail1:
1883 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1884 
1885 hxge_alloc_rx_mem_exit:
1886 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1887 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1888 
1889 	return (status);
1890 }
1891 
1892 /*ARGSUSED*/
1893 static void
1894 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1895     uint32_t num_chunks)
1896 {
1897 	int i;
1898 
1899 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1900 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1901 
1902 	for (i = 0; i < num_chunks; i++) {
1903 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1904 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1905 		hxge_dma_mem_free(dmap++);
1906 	}
1907 
1908 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1909 }
1910 
1911 /*ARGSUSED*/
1912 static hxge_status_t
1913 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1914     p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1915 {
1916 	p_hxge_dma_common_t	rx_dmap;
1917 	hxge_status_t		status = HXGE_OK;
1918 
1919 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1920 
1921 	rx_dmap = (p_hxge_dma_common_t)
1922 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1923 
1924 	rx_dmap->contig_alloc_type = B_FALSE;
1925 
1926 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1927 	    attr, size, &hxge_dev_desc_dma_acc_attr,
1928 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1929 	if (status != HXGE_OK) {
1930 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1931 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1932 		    " for size: %d", size));
1933 		goto hxge_alloc_rx_cntl_dma_fail1;
1934 	}
1935 
1936 	*dmap = rx_dmap;
1937 
1938 	goto hxge_alloc_rx_cntl_dma_exit;
1939 
1940 hxge_alloc_rx_cntl_dma_fail1:
1941 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1942 
1943 hxge_alloc_rx_cntl_dma_exit:
1944 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1945 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1946 
1947 	return (status);
1948 }
1949 
1950 /*ARGSUSED*/
1951 static void
1952 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1953 {
1954 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1955 
1956 	hxge_dma_mem_free(dmap);
1957 
1958 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1959 }
1960 
1961 static hxge_status_t
1962 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1963 {
1964 	hxge_status_t		status = HXGE_OK;
1965 	int			i, j;
1966 	uint32_t		ndmas, st_tdc;
1967 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1968 	p_hxge_hw_pt_cfg_t	p_cfgp;
1969 	p_hxge_dma_pool_t	dma_poolp;
1970 	p_hxge_dma_common_t	*dma_buf_p;
1971 	p_hxge_dma_pool_t	dma_cntl_poolp;
1972 	p_hxge_dma_common_t	*dma_cntl_p;
1973 	size_t			tx_buf_alloc_size;
1974 	size_t			tx_cntl_alloc_size;
1975 	uint32_t		*num_chunks;	/* per dma */
1976 
1977 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1978 
1979 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1980 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1981 	st_tdc = p_cfgp->start_tdc;
1982 	ndmas = p_cfgp->max_tdcs;
1983 
1984 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1985 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1986 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1987 	/*
1988 	 * Allocate memory for each transmit DMA channel.
1989 	 */
1990 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1991 	    KM_SLEEP);
1992 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1993 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1994 
1995 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1996 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1997 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1998 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1999 
2000 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
2001 
2002 	/*
2003 	 * Assume that each DMA channel will be configured with default
2004 	 * transmit bufer size for copying transmit data. (For packet payload
2005 	 * over this limit, packets will not be copied.)
2006 	 */
2007 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
2008 
2009 	/*
2010 	 * Addresses of transmit descriptor ring and the mailbox must be all
2011 	 * cache-aligned (64 bytes).
2012 	 */
2013 	tx_cntl_alloc_size = hxge_tx_ring_size;
2014 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
2015 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
2016 
2017 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
2018 	    KM_SLEEP);
2019 
2020 	/*
2021 	 * Allocate memory for transmit buffers and descriptor rings. Replace
2022 	 * allocation functions with interface functions provided by the
2023 	 * partition manager when it is available.
2024 	 *
2025 	 * Allocate memory for the transmit buffer pool.
2026 	 */
2027 	for (i = 0; i < ndmas; i++) {
2028 		num_chunks[i] = 0;
2029 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
2030 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
2031 		if (status != HXGE_OK) {
2032 			break;
2033 		}
2034 		st_tdc++;
2035 	}
2036 
2037 	if (i < ndmas) {
2038 		goto hxge_alloc_tx_mem_pool_fail1;
2039 	}
2040 
2041 	st_tdc = p_cfgp->start_tdc;
2042 
2043 	/*
2044 	 * Allocate memory for descriptor rings and mailbox.
2045 	 */
2046 	for (j = 0; j < ndmas; j++) {
2047 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2048 		    tx_cntl_alloc_size);
2049 		if (status != HXGE_OK) {
2050 			break;
2051 		}
2052 		st_tdc++;
2053 	}
2054 
2055 	if (j < ndmas) {
2056 		goto hxge_alloc_tx_mem_pool_fail2;
2057 	}
2058 
2059 	dma_poolp->ndmas = ndmas;
2060 	dma_poolp->num_chunks = num_chunks;
2061 	dma_poolp->buf_allocated = B_TRUE;
2062 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2063 	hxgep->tx_buf_pool_p = dma_poolp;
2064 
2065 	dma_cntl_poolp->ndmas = ndmas;
2066 	dma_cntl_poolp->buf_allocated = B_TRUE;
2067 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2068 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2069 
2070 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2071 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2072 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2073 
2074 	goto hxge_alloc_tx_mem_pool_exit;
2075 
2076 hxge_alloc_tx_mem_pool_fail2:
2077 	/* Free control buffers */
2078 	j--;
2079 	for (; j >= 0; j--) {
2080 		hxge_free_tx_cntl_dma(hxgep,
2081 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
2082 	}
2083 
2084 hxge_alloc_tx_mem_pool_fail1:
2085 	/* Free data buffers */
2086 	i--;
2087 	for (; i >= 0; i--) {
2088 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2089 		    num_chunks[i]);
2090 	}
2091 
2092 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2093 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2094 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2095 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2096 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2097 
2098 hxge_alloc_tx_mem_pool_exit:
2099 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2100 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2101 
2102 	return (status);
2103 }
2104 
2105 static hxge_status_t
2106 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2107     p_hxge_dma_common_t *dmap, size_t alloc_size,
2108     size_t block_size, uint32_t *num_chunks)
2109 {
2110 	p_hxge_dma_common_t	tx_dmap;
2111 	hxge_status_t		status = HXGE_OK;
2112 	size_t			total_alloc_size;
2113 	size_t			allocated = 0;
2114 	int			i, size_index, array_size;
2115 
2116 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2117 
2118 	tx_dmap = (p_hxge_dma_common_t)
2119 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2120 
2121 	total_alloc_size = alloc_size;
2122 	i = 0;
2123 	size_index = 0;
2124 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
2125 	while ((size_index < array_size) &&
2126 	    (alloc_sizes[size_index] < alloc_size))
2127 		size_index++;
2128 	if (size_index >= array_size) {
2129 		size_index = array_size - 1;
2130 	}
2131 
2132 	while ((allocated < total_alloc_size) &&
2133 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2134 		tx_dmap[i].dma_chunk_index = i;
2135 		tx_dmap[i].block_size = block_size;
2136 		tx_dmap[i].alength = alloc_sizes[size_index];
2137 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2138 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2139 		tx_dmap[i].dma_channel = dma_channel;
2140 		tx_dmap[i].contig_alloc_type = B_FALSE;
2141 
2142 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2143 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
2144 		    &hxge_dev_buf_dma_acc_attr,
2145 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
2146 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
2147 		if (status != HXGE_OK) {
2148 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2149 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
2150 			    " for size: %d", alloc_sizes[size_index]));
2151 			size_index--;
2152 		} else {
2153 			i++;
2154 			allocated += alloc_sizes[size_index];
2155 		}
2156 	}
2157 
2158 	if (allocated < total_alloc_size) {
2159 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2160 		    " hxge_alloc_tx_buf_dma: failed due to"
2161 		    " allocated(%d) < required(%d)",
2162 		    allocated, total_alloc_size));
2163 		goto hxge_alloc_tx_mem_fail1;
2164 	}
2165 
2166 	*num_chunks = i;
2167 	*dmap = tx_dmap;
2168 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2169 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2170 	    *dmap, i));
2171 	goto hxge_alloc_tx_mem_exit;
2172 
2173 hxge_alloc_tx_mem_fail1:
2174 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2175 
2176 hxge_alloc_tx_mem_exit:
2177 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2178 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2179 
2180 	return (status);
2181 }
2182 
2183 /*ARGSUSED*/
2184 static void
2185 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2186     uint32_t num_chunks)
2187 {
2188 	int i;
2189 
2190 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2191 
2192 	for (i = 0; i < num_chunks; i++) {
2193 		hxge_dma_mem_free(dmap++);
2194 	}
2195 
2196 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2197 }
2198 
2199 /*ARGSUSED*/
2200 static hxge_status_t
2201 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2202     p_hxge_dma_common_t *dmap, size_t size)
2203 {
2204 	p_hxge_dma_common_t	tx_dmap;
2205 	hxge_status_t		status = HXGE_OK;
2206 
2207 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2208 
2209 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2210 	    KM_SLEEP);
2211 
2212 	tx_dmap->contig_alloc_type = B_FALSE;
2213 
2214 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2215 	    &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2216 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2217 	if (status != HXGE_OK) {
2218 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2219 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2220 		    " for size: %d", size));
2221 		goto hxge_alloc_tx_cntl_dma_fail1;
2222 	}
2223 
2224 	*dmap = tx_dmap;
2225 
2226 	goto hxge_alloc_tx_cntl_dma_exit;
2227 
2228 hxge_alloc_tx_cntl_dma_fail1:
2229 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2230 
2231 hxge_alloc_tx_cntl_dma_exit:
2232 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2233 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2234 
2235 	return (status);
2236 }
2237 
2238 /*ARGSUSED*/
2239 static void
2240 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2241 {
2242 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2243 
2244 	hxge_dma_mem_free(dmap);
2245 
2246 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2247 }
2248 
2249 static void
2250 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2251 {
2252 	uint32_t		i, ndmas;
2253 	p_hxge_dma_pool_t	dma_poolp;
2254 	p_hxge_dma_common_t	*dma_buf_p;
2255 	p_hxge_dma_pool_t	dma_cntl_poolp;
2256 	p_hxge_dma_common_t	*dma_cntl_p;
2257 	uint32_t		*num_chunks;
2258 
2259 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2260 
2261 	dma_poolp = hxgep->tx_buf_pool_p;
2262 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2263 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2264 		    "<== hxge_free_tx_mem_pool "
2265 		    "(null rx buf pool or buf not allocated"));
2266 		return;
2267 	}
2268 
2269 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2270 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2271 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2272 		    "<== hxge_free_tx_mem_pool "
2273 		    "(null tx cntl buf pool or cntl buf not allocated"));
2274 		return;
2275 	}
2276 
2277 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2278 	num_chunks = dma_poolp->num_chunks;
2279 
2280 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2281 	ndmas = dma_cntl_poolp->ndmas;
2282 
2283 	for (i = 0; i < ndmas; i++) {
2284 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2285 	}
2286 
2287 	for (i = 0; i < ndmas; i++) {
2288 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2289 	}
2290 
2291 	for (i = 0; i < ndmas; i++) {
2292 		KMEM_FREE(dma_buf_p[i],
2293 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2294 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2295 	}
2296 
2297 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2298 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2299 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2300 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2301 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2302 
2303 	hxgep->tx_buf_pool_p = NULL;
2304 	hxgep->tx_cntl_pool_p = NULL;
2305 
2306 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2307 }
2308 
2309 /*ARGSUSED*/
2310 static hxge_status_t
2311 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2312     struct ddi_dma_attr *dma_attrp,
2313     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2314     p_hxge_dma_common_t dma_p)
2315 {
2316 	caddr_t		kaddrp;
2317 	int		ddi_status = DDI_SUCCESS;
2318 
2319 	dma_p->dma_handle = NULL;
2320 	dma_p->acc_handle = NULL;
2321 	dma_p->kaddrp = NULL;
2322 
2323 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2324 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2325 	if (ddi_status != DDI_SUCCESS) {
2326 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2327 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2328 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2329 	}
2330 
2331 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2332 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2333 	    &dma_p->acc_handle);
2334 	if (ddi_status != DDI_SUCCESS) {
2335 		/* The caller will decide whether it is fatal */
2336 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2337 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2338 		ddi_dma_free_handle(&dma_p->dma_handle);
2339 		dma_p->dma_handle = NULL;
2340 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2341 	}
2342 
2343 	if (dma_p->alength < length) {
2344 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2345 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2346 		ddi_dma_mem_free(&dma_p->acc_handle);
2347 		ddi_dma_free_handle(&dma_p->dma_handle);
2348 		dma_p->acc_handle = NULL;
2349 		dma_p->dma_handle = NULL;
2350 		return (HXGE_ERROR);
2351 	}
2352 
2353 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2354 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2355 	    &dma_p->dma_cookie, &dma_p->ncookies);
2356 	if (ddi_status != DDI_DMA_MAPPED) {
2357 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2358 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2359 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2360 		if (dma_p->acc_handle) {
2361 			ddi_dma_mem_free(&dma_p->acc_handle);
2362 			dma_p->acc_handle = NULL;
2363 		}
2364 		ddi_dma_free_handle(&dma_p->dma_handle);
2365 		dma_p->dma_handle = NULL;
2366 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2367 	}
2368 
2369 	if (dma_p->ncookies != 1) {
2370 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2371 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2372 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2373 		if (dma_p->acc_handle) {
2374 			ddi_dma_mem_free(&dma_p->acc_handle);
2375 			dma_p->acc_handle = NULL;
2376 		}
2377 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2378 		ddi_dma_free_handle(&dma_p->dma_handle);
2379 		dma_p->dma_handle = NULL;
2380 		return (HXGE_ERROR);
2381 	}
2382 
2383 	dma_p->kaddrp = kaddrp;
2384 #if defined(__i386)
2385 	dma_p->ioaddr_pp =
2386 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2387 #else
2388 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2389 #endif
2390 
2391 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2392 
2393 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2394 	    "dma buffer allocated: dma_p $%p "
2395 	    "return dmac_ladress from cookie $%p dmac_size %d "
2396 	    "dma_p->ioaddr_p $%p "
2397 	    "dma_p->orig_ioaddr_p $%p "
2398 	    "orig_vatopa $%p "
2399 	    "alength %d (0x%x) "
2400 	    "kaddrp $%p "
2401 	    "length %d (0x%x)",
2402 	    dma_p,
2403 	    dma_p->dma_cookie.dmac_laddress,
2404 	    dma_p->dma_cookie.dmac_size,
2405 	    dma_p->ioaddr_pp,
2406 	    dma_p->orig_ioaddr_pp,
2407 	    dma_p->orig_vatopa,
2408 	    dma_p->alength, dma_p->alength,
2409 	    kaddrp,
2410 	    length, length));
2411 
2412 	return (HXGE_OK);
2413 }
2414 
2415 static void
2416 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2417 {
2418 	if (dma_p == NULL)
2419 		return;
2420 
2421 	if (dma_p->dma_handle != NULL) {
2422 		if (dma_p->ncookies) {
2423 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2424 			dma_p->ncookies = 0;
2425 		}
2426 		ddi_dma_free_handle(&dma_p->dma_handle);
2427 		dma_p->dma_handle = NULL;
2428 	}
2429 
2430 	if (dma_p->acc_handle != NULL) {
2431 		ddi_dma_mem_free(&dma_p->acc_handle);
2432 		dma_p->acc_handle = NULL;
2433 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2434 	}
2435 
2436 	dma_p->kaddrp = NULL;
2437 	dma_p->alength = NULL;
2438 }
2439 
2440 /*
2441  *	hxge_m_start() -- start transmitting and receiving.
2442  *
2443  *	This function is called by the MAC layer when the first
2444  *	stream is open to prepare the hardware ready for sending
2445  *	and transmitting packets.
2446  */
2447 static int
2448 hxge_m_start(void *arg)
2449 {
2450 	p_hxge_t hxgep = (p_hxge_t)arg;
2451 
2452 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2453 
2454 	MUTEX_ENTER(hxgep->genlock);
2455 
2456 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2457 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2458 		    "<== hxge_m_start: initialization failed"));
2459 		MUTEX_EXIT(hxgep->genlock);
2460 		return (EIO);
2461 	}
2462 
2463 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2464 		/*
2465 		 * Start timer to check the system error and tx hangs
2466 		 */
2467 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2468 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2469 
2470 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2471 
2472 		hxgep->timeout.link_status = 0;
2473 		hxgep->timeout.report_link_status = B_TRUE;
2474 		hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2475 
2476 		/* Start the link status timer to check the link status */
2477 		MUTEX_ENTER(&hxgep->timeout.lock);
2478 		hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2479 		    hxgep->timeout.ticks);
2480 		MUTEX_EXIT(&hxgep->timeout.lock);
2481 	}
2482 
2483 	MUTEX_EXIT(hxgep->genlock);
2484 
2485 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2486 
2487 	return (0);
2488 }
2489 
2490 /*
2491  * hxge_m_stop(): stop transmitting and receiving.
2492  */
2493 static void
2494 hxge_m_stop(void *arg)
2495 {
2496 	p_hxge_t hxgep = (p_hxge_t)arg;
2497 
2498 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2499 
2500 	if (hxgep->hxge_timerid) {
2501 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2502 		hxgep->hxge_timerid = 0;
2503 	}
2504 
2505 	/* Stop the link status timer before unregistering */
2506 	MUTEX_ENTER(&hxgep->timeout.lock);
2507 	if (hxgep->timeout.id) {
2508 		(void) untimeout(hxgep->timeout.id);
2509 		hxgep->timeout.id = 0;
2510 	}
2511 	hxge_link_update(hxgep, LINK_STATE_DOWN);
2512 	MUTEX_EXIT(&hxgep->timeout.lock);
2513 
2514 	MUTEX_ENTER(hxgep->genlock);
2515 
2516 	hxge_uninit(hxgep);
2517 
2518 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2519 
2520 	MUTEX_EXIT(hxgep->genlock);
2521 
2522 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2523 }
2524 
2525 static int
2526 hxge_m_unicst(void *arg, const uint8_t *macaddr)
2527 {
2528 	p_hxge_t		hxgep = (p_hxge_t)arg;
2529 	struct ether_addr	addrp;
2530 	hxge_status_t		status;
2531 
2532 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
2533 
2534 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
2535 
2536 	status = hxge_set_mac_addr(hxgep, &addrp);
2537 	if (status != HXGE_OK) {
2538 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2539 		    "<== hxge_m_unicst: set unitcast failed"));
2540 		return (EINVAL);
2541 	}
2542 
2543 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
2544 
2545 	return (0);
2546 }
2547 
2548 static int
2549 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2550 {
2551 	p_hxge_t		hxgep = (p_hxge_t)arg;
2552 	struct ether_addr	addrp;
2553 
2554 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2555 
2556 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2557 
2558 	if (add) {
2559 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2560 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2561 			    "<== hxge_m_multicst: add multicast failed"));
2562 			return (EINVAL);
2563 		}
2564 	} else {
2565 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2566 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2567 			    "<== hxge_m_multicst: del multicast failed"));
2568 			return (EINVAL);
2569 		}
2570 	}
2571 
2572 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2573 
2574 	return (0);
2575 }
2576 
2577 static int
2578 hxge_m_promisc(void *arg, boolean_t on)
2579 {
2580 	p_hxge_t hxgep = (p_hxge_t)arg;
2581 
2582 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2583 
2584 	if (hxge_set_promisc(hxgep, on)) {
2585 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2586 		    "<== hxge_m_promisc: set promisc failed"));
2587 		return (EINVAL);
2588 	}
2589 
2590 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2591 
2592 	return (0);
2593 }
2594 
2595 static void
2596 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2597 {
2598 	p_hxge_t	hxgep = (p_hxge_t)arg;
2599 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2600 	boolean_t	need_privilege;
2601 	int		err;
2602 	int		cmd;
2603 
2604 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2605 
2606 	iocp = (struct iocblk *)mp->b_rptr;
2607 	iocp->ioc_error = 0;
2608 	need_privilege = B_TRUE;
2609 	cmd = iocp->ioc_cmd;
2610 
2611 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2612 	switch (cmd) {
2613 	default:
2614 		miocnak(wq, mp, 0, EINVAL);
2615 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2616 		return;
2617 
2618 	case LB_GET_INFO_SIZE:
2619 	case LB_GET_INFO:
2620 	case LB_GET_MODE:
2621 		need_privilege = B_FALSE;
2622 		break;
2623 
2624 	case LB_SET_MODE:
2625 		break;
2626 
2627 	case ND_GET:
2628 		need_privilege = B_FALSE;
2629 		break;
2630 	case ND_SET:
2631 		break;
2632 
2633 	case HXGE_GET64:
2634 	case HXGE_PUT64:
2635 	case HXGE_GET_TX_RING_SZ:
2636 	case HXGE_GET_TX_DESC:
2637 	case HXGE_TX_SIDE_RESET:
2638 	case HXGE_RX_SIDE_RESET:
2639 	case HXGE_GLOBAL_RESET:
2640 	case HXGE_RESET_MAC:
2641 	case HXGE_PUT_TCAM:
2642 	case HXGE_GET_TCAM:
2643 	case HXGE_RTRACE:
2644 
2645 		need_privilege = B_FALSE;
2646 		break;
2647 	}
2648 
2649 	if (need_privilege) {
2650 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2651 		if (err != 0) {
2652 			miocnak(wq, mp, 0, err);
2653 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2654 			    "<== hxge_m_ioctl: no priv"));
2655 			return;
2656 		}
2657 	}
2658 
2659 	switch (cmd) {
2660 	case ND_GET:
2661 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2662 	case ND_SET:
2663 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2664 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2665 		break;
2666 
2667 	case LB_GET_MODE:
2668 	case LB_SET_MODE:
2669 	case LB_GET_INFO_SIZE:
2670 	case LB_GET_INFO:
2671 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2672 		break;
2673 
2674 	case HXGE_PUT_TCAM:
2675 	case HXGE_GET_TCAM:
2676 	case HXGE_GET64:
2677 	case HXGE_PUT64:
2678 	case HXGE_GET_TX_RING_SZ:
2679 	case HXGE_GET_TX_DESC:
2680 	case HXGE_TX_SIDE_RESET:
2681 	case HXGE_RX_SIDE_RESET:
2682 	case HXGE_GLOBAL_RESET:
2683 	case HXGE_RESET_MAC:
2684 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2685 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2686 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2687 		break;
2688 	}
2689 
2690 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2691 }
2692 
2693 extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
2694 
2695 static void
2696 hxge_m_resources(void *arg)
2697 {
2698 	p_hxge_t hxgep = arg;
2699 	mac_rx_fifo_t mrf;
2700 	p_rx_rcr_rings_t rcr_rings;
2701 	p_rx_rcr_ring_t *rcr_p;
2702 	p_rx_rcr_ring_t rcrp;
2703 	uint32_t i, ndmas;
2704 	int status;
2705 
2706 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources"));
2707 
2708 	MUTEX_ENTER(hxgep->genlock);
2709 
2710 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2711 		status = hxge_init(hxgep);
2712 		if (status != HXGE_OK) {
2713 			HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: "
2714 			    "hxge_init failed"));
2715 			MUTEX_EXIT(hxgep->genlock);
2716 			return;
2717 		}
2718 	}
2719 
2720 	mrf.mrf_type = MAC_RX_FIFO;
2721 	mrf.mrf_blank = hxge_rx_hw_blank;
2722 	mrf.mrf_arg = (void *)hxgep;
2723 
2724 	mrf.mrf_normal_blank_time = RXDMA_RCR_TO_DEFAULT;
2725 	mrf.mrf_normal_pkt_count = RXDMA_RCR_PTHRES_DEFAULT;
2726 
2727 	rcr_rings = hxgep->rx_rcr_rings;
2728 	rcr_p = rcr_rings->rcr_rings;
2729 	ndmas = rcr_rings->ndmas;
2730 
2731 	/*
2732 	 * Export our receive resources to the MAC layer.
2733 	 */
2734 	for (i = 0; i < ndmas; i++) {
2735 		rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i];
2736 		rcrp->rcr_mac_handle =
2737 		    mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf);
2738 
2739 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2740 		    "==> hxge_m_resources: vdma %d dma %d "
2741 		    "rcrptr 0x%016llx mac_handle 0x%016llx",
2742 		    i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle));
2743 	}
2744 
2745 	MUTEX_EXIT(hxgep->genlock);
2746 
2747 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources"));
2748 }
2749 
2750 /*
2751  * Set an alternate MAC address
2752  */
2753 static int
2754 hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot)
2755 {
2756 	uint64_t	address;
2757 	uint64_t	tmp;
2758 	hpi_status_t	status;
2759 	uint8_t		addrn;
2760 	int		i;
2761 
2762 	/*
2763 	 * Convert a byte array to a 48 bit value.
2764 	 * Need to check endianess if in doubt
2765 	 */
2766 	address = 0;
2767 	for (i = 0; i < ETHERADDRL; i++) {
2768 		tmp = maddr[i];
2769 		address <<= 8;
2770 		address |= tmp;
2771 	}
2772 
2773 	addrn = (uint8_t)slot;
2774 	status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address);
2775 	if (status != HPI_SUCCESS)
2776 		return (EIO);
2777 
2778 	return (0);
2779 }
2780 
2781 static void
2782 hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot)
2783 {
2784 	p_hxge_mmac_stats_t	mmac_stats;
2785 	int			i;
2786 	hxge_mmac_t		*mmac_info;
2787 
2788 	mmac_info = &hxgep->hxge_mmac_info;
2789 	mmac_stats = &hxgep->statsp->mmac_stats;
2790 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
2791 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
2792 
2793 	for (i = 0; i < ETHERADDRL; i++) {
2794 		mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] =
2795 		    mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
2796 	}
2797 }
2798 
2799 /*
2800  * Find an unused address slot, set the address value to the one specified,
2801  * enable the port to start filtering on the new MAC address.
2802  * Returns: 0 on success.
2803  */
2804 int
2805 hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
2806 {
2807 	p_hxge_t	hxgep = arg;
2808 	mac_addr_slot_t	slot;
2809 	hxge_mmac_t	*mmac_info;
2810 	int		err;
2811 	hxge_status_t	status;
2812 
2813 	mutex_enter(hxgep->genlock);
2814 
2815 	/*
2816 	 * Make sure that hxge is initialized, if _start() has
2817 	 * not been called.
2818 	 */
2819 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2820 		status = hxge_init(hxgep);
2821 		if (status != HXGE_OK) {
2822 			mutex_exit(hxgep->genlock);
2823 			return (ENXIO);
2824 		}
2825 	}
2826 
2827 	mmac_info = &hxgep->hxge_mmac_info;
2828 	if (mmac_info->naddrfree == 0) {
2829 		mutex_exit(hxgep->genlock);
2830 		return (ENOSPC);
2831 	}
2832 
2833 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2834 	    maddr->mma_addrlen)) {
2835 		mutex_exit(hxgep->genlock);
2836 		return (EINVAL);
2837 	}
2838 
2839 	/*
2840 	 * Search for the first available slot. Because naddrfree
2841 	 * is not zero, we are guaranteed to find one.
2842 	 * Slot 0 is for unique (primary) MAC.  The first alternate
2843 	 * MAC slot is slot 1.
2844 	 */
2845 	for (slot = 1; slot < mmac_info->num_mmac; slot++) {
2846 		if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
2847 			break;
2848 	}
2849 
2850 	ASSERT(slot < mmac_info->num_mmac);
2851 	if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) {
2852 		mutex_exit(hxgep->genlock);
2853 		return (err);
2854 	}
2855 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
2856 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
2857 	mmac_info->naddrfree--;
2858 	hxge_mmac_kstat_update(hxgep, slot);
2859 
2860 	maddr->mma_slot = slot;
2861 
2862 	mutex_exit(hxgep->genlock);
2863 	return (0);
2864 }
2865 
2866 /*
2867  * Remove the specified mac address and update
2868  * the h/w not to filter the mac address anymore.
2869  * Returns: 0, on success.
2870  */
2871 int
2872 hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
2873 {
2874 	p_hxge_t	hxgep = arg;
2875 	hxge_mmac_t	*mmac_info;
2876 	int		err = 0;
2877 	hxge_status_t	status;
2878 
2879 	mutex_enter(hxgep->genlock);
2880 
2881 	/*
2882 	 * Make sure that hxge is initialized, if _start() has
2883 	 * not been called.
2884 	 */
2885 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2886 		status = hxge_init(hxgep);
2887 		if (status != HXGE_OK) {
2888 			mutex_exit(hxgep->genlock);
2889 			return (ENXIO);
2890 		}
2891 	}
2892 
2893 	mmac_info = &hxgep->hxge_mmac_info;
2894 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2895 		mutex_exit(hxgep->genlock);
2896 		return (EINVAL);
2897 	}
2898 
2899 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2900 		if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) ==
2901 		    HPI_SUCCESS) {
2902 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
2903 			mmac_info->naddrfree++;
2904 			/*
2905 			 * Clear mac_pool[slot].addr so that kstat shows 0
2906 			 * alternate MAC address if the slot is not used.
2907 			 */
2908 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
2909 			hxge_mmac_kstat_update(hxgep, slot);
2910 		} else {
2911 			err = EIO;
2912 		}
2913 	} else {
2914 		err = EINVAL;
2915 	}
2916 
2917 	mutex_exit(hxgep->genlock);
2918 	return (err);
2919 }
2920 
2921 /*
2922  * Modify a mac address added by hxge_mmac_add().
2923  * Returns: 0, on success.
2924  */
2925 int
2926 hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
2927 {
2928 	p_hxge_t	hxgep = arg;
2929 	mac_addr_slot_t	slot;
2930 	hxge_mmac_t	*mmac_info;
2931 	int		err = 0;
2932 	hxge_status_t	status;
2933 
2934 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2935 	    maddr->mma_addrlen))
2936 		return (EINVAL);
2937 
2938 	slot = maddr->mma_slot;
2939 
2940 	mutex_enter(hxgep->genlock);
2941 
2942 	/*
2943 	 * Make sure that hxge is initialized, if _start() has
2944 	 * not been called.
2945 	 */
2946 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2947 		status = hxge_init(hxgep);
2948 		if (status != HXGE_OK) {
2949 			mutex_exit(hxgep->genlock);
2950 			return (ENXIO);
2951 		}
2952 	}
2953 
2954 	mmac_info = &hxgep->hxge_mmac_info;
2955 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2956 		mutex_exit(hxgep->genlock);
2957 		return (EINVAL);
2958 	}
2959 
2960 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2961 		if ((err = hxge_altmac_set(hxgep, maddr->mma_addr,
2962 		    slot)) == 0) {
2963 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
2964 			    ETHERADDRL);
2965 			hxge_mmac_kstat_update(hxgep, slot);
2966 		}
2967 	} else {
2968 		err = EINVAL;
2969 	}
2970 
2971 	mutex_exit(hxgep->genlock);
2972 	return (err);
2973 }
2974 
2975 /*
2976  * static int
2977  * hxge_m_mmac_get() - Get the MAC address and other information
2978  *	related to the slot.  mma_flags should be set to 0 in the call.
2979  *	Note: although kstat shows MAC address as zero when a slot is
2980  *	not used, Crossbow expects hxge_m_mmac_get to copy factory MAC
2981  *	to the caller as long as the slot is not using a user MAC address.
2982  *	The following table shows the rules,
2983  *
2984  *     					USED    VENDOR    mma_addr
2985  *	------------------------------------------------------------
2986  *	(1) Slot uses a user MAC:	yes      no     user MAC
2987  *	(2) Slot uses a factory MAC:    yes      yes    factory MAC
2988  *	(3) Slot is not used but is
2989  *	     factory MAC capable:	no       yes    factory MAC
2990  *	(4) Slot is not used and is
2991  *	     not factory MAC capable:   no       no	0
2992  *	------------------------------------------------------------
2993  */
2994 int
2995 hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
2996 {
2997 	hxge_t		*hxgep = arg;
2998 	mac_addr_slot_t	slot;
2999 	hxge_mmac_t	*mmac_info;
3000 	hxge_status_t	status;
3001 
3002 	slot = maddr->mma_slot;
3003 
3004 	mutex_enter(hxgep->genlock);
3005 
3006 	/*
3007 	 * Make sure that hxge is initialized, if _start() has
3008 	 * not been called.
3009 	 */
3010 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
3011 		status = hxge_init(hxgep);
3012 		if (status != HXGE_OK) {
3013 			mutex_exit(hxgep->genlock);
3014 			return (ENXIO);
3015 		}
3016 	}
3017 
3018 	mmac_info = &hxgep->hxge_mmac_info;
3019 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
3020 		mutex_exit(hxgep->genlock);
3021 		return (EINVAL);
3022 	}
3023 
3024 	maddr->mma_flags = 0;
3025 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3026 		maddr->mma_flags |= MMAC_SLOT_USED;
3027 		bcopy(mmac_info->mac_pool[slot].addr,
3028 		    maddr->mma_addr, ETHERADDRL);
3029 		maddr->mma_addrlen = ETHERADDRL;
3030 	}
3031 
3032 	mutex_exit(hxgep->genlock);
3033 	return (0);
3034 }
3035 
3036 /*ARGSUSED*/
3037 boolean_t
3038 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3039 {
3040 	p_hxge_t		hxgep = (p_hxge_t)arg;
3041 	uint32_t		*txflags = cap_data;
3042 	multiaddress_capab_t	*mmacp = cap_data;
3043 
3044 	switch (cap) {
3045 	case MAC_CAPAB_HCKSUM:
3046 		*txflags = HCKSUM_INET_PARTIAL;
3047 		break;
3048 
3049 	case MAC_CAPAB_POLL:
3050 		/*
3051 		 * There's nothing for us to fill in, simply returning B_TRUE
3052 		 * stating that we support polling is sufficient.
3053 		 */
3054 		break;
3055 
3056 	case MAC_CAPAB_MULTIADDRESS:
3057 		/*
3058 		 * The number of MAC addresses made available by
3059 		 * this capability is one less than the total as
3060 		 * the primary address in slot 0 is counted in
3061 		 * the total.
3062 		 */
3063 		mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1;
3064 		mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree;
3065 		mmacp->maddr_flag = 0;	/* No multiple factory macs */
3066 		mmacp->maddr_handle = hxgep;
3067 		mmacp->maddr_add = hxge_m_mmac_add;
3068 		mmacp->maddr_remove = hxge_m_mmac_remove;
3069 		mmacp->maddr_modify = hxge_m_mmac_modify;
3070 		mmacp->maddr_get = hxge_m_mmac_get;
3071 		mmacp->maddr_reserve = NULL;	/* No multiple factory macs */
3072 		break;
3073 	default:
3074 		return (B_FALSE);
3075 	}
3076 	return (B_TRUE);
3077 }
3078 
3079 static boolean_t
3080 hxge_param_locked(mac_prop_id_t pr_num)
3081 {
3082 	/*
3083 	 * All adv_* parameters are locked (read-only) while
3084 	 * the device is in any sort of loopback mode ...
3085 	 */
3086 	switch (pr_num) {
3087 		case MAC_PROP_ADV_1000FDX_CAP:
3088 		case MAC_PROP_EN_1000FDX_CAP:
3089 		case MAC_PROP_ADV_1000HDX_CAP:
3090 		case MAC_PROP_EN_1000HDX_CAP:
3091 		case MAC_PROP_ADV_100FDX_CAP:
3092 		case MAC_PROP_EN_100FDX_CAP:
3093 		case MAC_PROP_ADV_100HDX_CAP:
3094 		case MAC_PROP_EN_100HDX_CAP:
3095 		case MAC_PROP_ADV_10FDX_CAP:
3096 		case MAC_PROP_EN_10FDX_CAP:
3097 		case MAC_PROP_ADV_10HDX_CAP:
3098 		case MAC_PROP_EN_10HDX_CAP:
3099 		case MAC_PROP_AUTONEG:
3100 		case MAC_PROP_FLOWCTRL:
3101 			return (B_TRUE);
3102 	}
3103 	return (B_FALSE);
3104 }
3105 
3106 /*
3107  * callback functions for set/get of properties
3108  */
3109 static int
3110 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3111     uint_t pr_valsize, const void *pr_val)
3112 {
3113 	hxge_t		*hxgep = barg;
3114 	p_hxge_stats_t	statsp;
3115 	int		err = 0;
3116 	uint32_t	new_mtu, old_framesize, new_framesize;
3117 
3118 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3119 
3120 	statsp = hxgep->statsp;
3121 	mutex_enter(hxgep->genlock);
3122 	if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3123 	    hxge_param_locked(pr_num)) {
3124 		/*
3125 		 * All adv_* parameters are locked (read-only)
3126 		 * while the device is in any sort of loopback mode.
3127 		 */
3128 		HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3129 		    "==> hxge_m_setprop: loopback mode: read only"));
3130 		mutex_exit(hxgep->genlock);
3131 		return (EBUSY);
3132 	}
3133 
3134 	switch (pr_num) {
3135 		/*
3136 		 * These properties are either not exist or read only
3137 		 */
3138 		case MAC_PROP_EN_1000FDX_CAP:
3139 		case MAC_PROP_EN_100FDX_CAP:
3140 		case MAC_PROP_EN_10FDX_CAP:
3141 		case MAC_PROP_EN_1000HDX_CAP:
3142 		case MAC_PROP_EN_100HDX_CAP:
3143 		case MAC_PROP_EN_10HDX_CAP:
3144 		case MAC_PROP_ADV_1000FDX_CAP:
3145 		case MAC_PROP_ADV_1000HDX_CAP:
3146 		case MAC_PROP_ADV_100FDX_CAP:
3147 		case MAC_PROP_ADV_100HDX_CAP:
3148 		case MAC_PROP_ADV_10FDX_CAP:
3149 		case MAC_PROP_ADV_10HDX_CAP:
3150 		case MAC_PROP_STATUS:
3151 		case MAC_PROP_SPEED:
3152 		case MAC_PROP_DUPLEX:
3153 		case MAC_PROP_AUTONEG:
3154 		/*
3155 		 * Flow control is handled in the shared domain and
3156 		 * it is readonly here.
3157 		 */
3158 		case MAC_PROP_FLOWCTRL:
3159 			err = EINVAL;
3160 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3161 			    "==> hxge_m_setprop:  read only property %d",
3162 			    pr_num));
3163 			break;
3164 
3165 		case MAC_PROP_MTU:
3166 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3167 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3168 			    "==> hxge_m_setprop: set MTU: %d", new_mtu));
3169 
3170 			new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3171 			if (new_framesize == hxgep->vmac.maxframesize) {
3172 				err = 0;
3173 				break;
3174 			}
3175 
3176 			if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3177 				err = EBUSY;
3178 				break;
3179 			}
3180 
3181 			if (new_framesize < MIN_FRAME_SIZE ||
3182 			    new_framesize > MAX_FRAME_SIZE) {
3183 				err = EINVAL;
3184 				break;
3185 			}
3186 
3187 			old_framesize = hxgep->vmac.maxframesize;
3188 			hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3189 
3190 			if (hxge_vmac_set_framesize(hxgep)) {
3191 				hxgep->vmac.maxframesize =
3192 				    (uint16_t)old_framesize;
3193 				err = EINVAL;
3194 				break;
3195 			}
3196 
3197 			err = mac_maxsdu_update(hxgep->mach, new_mtu);
3198 			if (err) {
3199 				hxgep->vmac.maxframesize =
3200 				    (uint16_t)old_framesize;
3201 				(void) hxge_vmac_set_framesize(hxgep);
3202 			}
3203 
3204 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3205 			    "==> hxge_m_setprop: set MTU: %d maxframe %d",
3206 			    new_mtu, hxgep->vmac.maxframesize));
3207 			break;
3208 
3209 		case MAC_PROP_PRIVATE:
3210 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3211 			    "==> hxge_m_setprop: private property"));
3212 			err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3213 			    pr_val);
3214 			break;
3215 
3216 		default:
3217 			err = ENOTSUP;
3218 			break;
3219 	}
3220 
3221 	mutex_exit(hxgep->genlock);
3222 
3223 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3224 	    "<== hxge_m_setprop (return %d)", err));
3225 
3226 	return (err);
3227 }
3228 
3229 /* ARGSUSED */
3230 static int
3231 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
3232     void *pr_val)
3233 {
3234 	int		err = 0;
3235 	link_flowctrl_t	fl;
3236 
3237 	switch (pr_num) {
3238 	case MAC_PROP_DUPLEX:
3239 		*(uint8_t *)pr_val = 2;
3240 		break;
3241 	case MAC_PROP_AUTONEG:
3242 		*(uint8_t *)pr_val = 0;
3243 		break;
3244 	case MAC_PROP_FLOWCTRL:
3245 		if (pr_valsize < sizeof (link_flowctrl_t))
3246 			return (EINVAL);
3247 		fl = LINK_FLOWCTRL_TX;
3248 		bcopy(&fl, pr_val, sizeof (fl));
3249 		break;
3250 	default:
3251 		err = ENOTSUP;
3252 		break;
3253 	}
3254 	return (err);
3255 }
3256 
3257 static int
3258 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3259     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3260 {
3261 	hxge_t 		*hxgep = barg;
3262 	p_hxge_stats_t	statsp = hxgep->statsp;
3263 	int		err = 0;
3264 	link_flowctrl_t fl;
3265 	uint64_t	tmp = 0;
3266 	link_state_t	ls;
3267 
3268 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3269 	    "==> hxge_m_getprop: pr_num %d", pr_num));
3270 
3271 	if (pr_valsize == 0)
3272 		return (EINVAL);
3273 
3274 	*perm = MAC_PROP_PERM_RW;
3275 
3276 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3277 		err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val);
3278 		return (err);
3279 	}
3280 
3281 	bzero(pr_val, pr_valsize);
3282 	switch (pr_num) {
3283 		case MAC_PROP_DUPLEX:
3284 			*perm = MAC_PROP_PERM_READ;
3285 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3286 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3287 			    "==> hxge_m_getprop: duplex mode %d",
3288 			    *(uint8_t *)pr_val));
3289 			break;
3290 
3291 		case MAC_PROP_SPEED:
3292 			*perm = MAC_PROP_PERM_READ;
3293 			if (pr_valsize < sizeof (uint64_t))
3294 				return (EINVAL);
3295 			tmp = statsp->mac_stats.link_speed * 1000000ull;
3296 			bcopy(&tmp, pr_val, sizeof (tmp));
3297 			break;
3298 
3299 		case MAC_PROP_STATUS:
3300 			*perm = MAC_PROP_PERM_READ;
3301 			if (pr_valsize < sizeof (link_state_t))
3302 				return (EINVAL);
3303 			if (!statsp->mac_stats.link_up)
3304 				ls = LINK_STATE_DOWN;
3305 			else
3306 				ls = LINK_STATE_UP;
3307 			bcopy(&ls, pr_val, sizeof (ls));
3308 			break;
3309 
3310 		case MAC_PROP_FLOWCTRL:
3311 			/*
3312 			 * Flow control is supported by the shared domain and
3313 			 * it is currently transmit only
3314 			 */
3315 			*perm = MAC_PROP_PERM_READ;
3316 			if (pr_valsize < sizeof (link_flowctrl_t))
3317 				return (EINVAL);
3318 			fl = LINK_FLOWCTRL_TX;
3319 			bcopy(&fl, pr_val, sizeof (fl));
3320 			break;
3321 		case MAC_PROP_AUTONEG:
3322 			/* 10G link only and it is not negotiable */
3323 			*perm = MAC_PROP_PERM_READ;
3324 			*(uint8_t *)pr_val = 0;
3325 			break;
3326 		case MAC_PROP_ADV_1000FDX_CAP:
3327 		case MAC_PROP_ADV_100FDX_CAP:
3328 		case MAC_PROP_ADV_10FDX_CAP:
3329 		case MAC_PROP_ADV_1000HDX_CAP:
3330 		case MAC_PROP_ADV_100HDX_CAP:
3331 		case MAC_PROP_ADV_10HDX_CAP:
3332 		case MAC_PROP_EN_1000FDX_CAP:
3333 		case MAC_PROP_EN_100FDX_CAP:
3334 		case MAC_PROP_EN_10FDX_CAP:
3335 		case MAC_PROP_EN_1000HDX_CAP:
3336 		case MAC_PROP_EN_100HDX_CAP:
3337 		case MAC_PROP_EN_10HDX_CAP:
3338 			err = ENOTSUP;
3339 			break;
3340 
3341 		case MAC_PROP_PRIVATE:
3342 			err = hxge_get_priv_prop(hxgep, pr_name, pr_flags,
3343 			    pr_valsize, pr_val);
3344 			break;
3345 		default:
3346 			err = EINVAL;
3347 			break;
3348 	}
3349 
3350 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3351 
3352 	return (err);
3353 }
3354 
3355 /* ARGSUSED */
3356 static int
3357 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3358     const void *pr_val)
3359 {
3360 	p_hxge_param_t	param_arr = hxgep->param_arr;
3361 	int		err = 0;
3362 
3363 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3364 	    "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3365 
3366 	if (pr_val == NULL) {
3367 		return (EINVAL);
3368 	}
3369 
3370 	/* Blanking */
3371 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3372 		err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3373 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3374 	} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3375 		err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3376 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3377 
3378 	/* Classification */
3379 	} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3380 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3381 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3382 	} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3383 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3384 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3385 	} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3386 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3387 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3388 	} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3389 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3390 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3391 	} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3392 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3393 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3394 	} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3395 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3396 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3397 	} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3398 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3399 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3400 	} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3401 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3402 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3403 	} else {
3404 		err = EINVAL;
3405 	}
3406 
3407 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3408 	    "<== hxge_set_priv_prop: err %d", err));
3409 
3410 	return (err);
3411 }
3412 
3413 static int
3414 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags,
3415     uint_t pr_valsize, void *pr_val)
3416 {
3417 	p_hxge_param_t	param_arr = hxgep->param_arr;
3418 	char		valstr[MAXNAMELEN];
3419 	int		err = 0;
3420 	uint_t		strsize;
3421 	int		value = 0;
3422 
3423 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3424 	    "==> hxge_get_priv_prop: property %s", pr_name));
3425 
3426 	if (pr_flags & MAC_PROP_DEFAULT) {
3427 		/* Receive Interrupt Blanking Parameters */
3428 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3429 			value = RXDMA_RCR_TO_DEFAULT;
3430 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3431 			value = RXDMA_RCR_PTHRES_DEFAULT;
3432 
3433 		/* Classification and Load Distribution Configuration */
3434 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3435 		    strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3436 		    strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3437 		    strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3438 		    strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3439 		    strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3440 		    strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3441 		    strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3442 			value = HXGE_CLASS_TCAM_LOOKUP;
3443 		} else {
3444 			err = EINVAL;
3445 		}
3446 	} else {
3447 		/* Receive Interrupt Blanking Parameters */
3448 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3449 			value = hxgep->intr_timeout;
3450 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3451 			value = hxgep->intr_threshold;
3452 
3453 		/* Classification and Load Distribution Configuration */
3454 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3455 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3456 			    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3457 
3458 			value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3459 		} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3460 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3461 			    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3462 
3463 			value = (int)param_arr[param_class_opt_ipv4_udp].value;
3464 		} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3465 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3466 			    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3467 
3468 			value = (int)param_arr[param_class_opt_ipv4_ah].value;
3469 		} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3470 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3471 			    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3472 
3473 			value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3474 		} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3475 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3476 			    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3477 
3478 			value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3479 		} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3480 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3481 			    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3482 
3483 			value = (int)param_arr[param_class_opt_ipv6_udp].value;
3484 		} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3485 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3486 			    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3487 
3488 			value = (int)param_arr[param_class_opt_ipv6_ah].value;
3489 		} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3490 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3491 			    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3492 
3493 			value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3494 		} else {
3495 			err = EINVAL;
3496 		}
3497 	}
3498 
3499 	if (err == 0) {
3500 		(void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3501 
3502 		strsize = (uint_t)strlen(valstr);
3503 		if (pr_valsize < strsize) {
3504 			err = ENOBUFS;
3505 		} else {
3506 			(void) strlcpy(pr_val, valstr, pr_valsize);
3507 		}
3508 	}
3509 
3510 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3511 	    "<== hxge_get_priv_prop: return %d", err));
3512 
3513 	return (err);
3514 }
3515 /*
3516  * Module loading and removing entry points.
3517  */
3518 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3519     nodev, NULL, D_MP, NULL, NULL);
3520 
3521 extern struct mod_ops mod_driverops;
3522 
3523 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
3524 
3525 /*
3526  * Module linkage information for the kernel.
3527  */
3528 static struct modldrv hxge_modldrv = {
3529 	&mod_driverops,
3530 	HXGE_DESC_VER,
3531 	&hxge_dev_ops
3532 };
3533 
3534 static struct modlinkage modlinkage = {
3535 	MODREV_1, (void *) &hxge_modldrv, NULL
3536 };
3537 
3538 int
3539 _init(void)
3540 {
3541 	int status;
3542 
3543 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3544 	mac_init_ops(&hxge_dev_ops, "hxge");
3545 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3546 	if (status != 0) {
3547 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3548 		    "failed to init device soft state"));
3549 		mac_fini_ops(&hxge_dev_ops);
3550 		goto _init_exit;
3551 	}
3552 
3553 	status = mod_install(&modlinkage);
3554 	if (status != 0) {
3555 		ddi_soft_state_fini(&hxge_list);
3556 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3557 		goto _init_exit;
3558 	}
3559 
3560 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3561 
3562 _init_exit:
3563 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3564 
3565 	return (status);
3566 }
3567 
3568 int
3569 _fini(void)
3570 {
3571 	int status;
3572 
3573 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3574 
3575 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3576 
3577 	if (hxge_mblks_pending)
3578 		return (EBUSY);
3579 
3580 	status = mod_remove(&modlinkage);
3581 	if (status != DDI_SUCCESS) {
3582 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
3583 		    "Module removal failed 0x%08x", status));
3584 		goto _fini_exit;
3585 	}
3586 
3587 	mac_fini_ops(&hxge_dev_ops);
3588 
3589 	ddi_soft_state_fini(&hxge_list);
3590 
3591 	MUTEX_DESTROY(&hxge_common_lock);
3592 
3593 _fini_exit:
3594 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3595 
3596 	return (status);
3597 }
3598 
3599 int
3600 _info(struct modinfo *modinfop)
3601 {
3602 	int status;
3603 
3604 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3605 	status = mod_info(&modlinkage, modinfop);
3606 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3607 
3608 	return (status);
3609 }
3610 
3611 /*ARGSUSED*/
3612 hxge_status_t
3613 hxge_add_intrs(p_hxge_t hxgep)
3614 {
3615 	int		intr_types;
3616 	int		type = 0;
3617 	int		ddi_status = DDI_SUCCESS;
3618 	hxge_status_t	status = HXGE_OK;
3619 
3620 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3621 
3622 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
3623 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3624 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
3625 	hxgep->hxge_intr_type.intr_added = 0;
3626 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3627 	hxgep->hxge_intr_type.intr_type = 0;
3628 
3629 	if (hxge_msi_enable) {
3630 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3631 	}
3632 
3633 	/* Get the supported interrupt types */
3634 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3635 	    != DDI_SUCCESS) {
3636 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3637 		    "ddi_intr_get_supported_types failed: status 0x%08x",
3638 		    ddi_status));
3639 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3640 	}
3641 
3642 	hxgep->hxge_intr_type.intr_types = intr_types;
3643 
3644 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3645 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3646 
3647 	/*
3648 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3649 	 *	(1): 1 - MSI
3650 	 *	(2): 2 - MSI-X
3651 	 *	others - FIXED
3652 	 */
3653 	switch (hxge_msi_enable) {
3654 	default:
3655 		type = DDI_INTR_TYPE_FIXED;
3656 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3657 		    "use fixed (intx emulation) type %08x", type));
3658 		break;
3659 
3660 	case 2:
3661 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3662 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3663 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3664 			type = DDI_INTR_TYPE_MSIX;
3665 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3666 			    "==> hxge_add_intrs: "
3667 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3668 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3669 			type = DDI_INTR_TYPE_MSI;
3670 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3671 			    "==> hxge_add_intrs: "
3672 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3673 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3674 			type = DDI_INTR_TYPE_FIXED;
3675 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3676 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3677 		}
3678 		break;
3679 
3680 	case 1:
3681 		if (intr_types & DDI_INTR_TYPE_MSI) {
3682 			type = DDI_INTR_TYPE_MSI;
3683 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3684 			    "==> hxge_add_intrs: "
3685 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3686 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3687 			type = DDI_INTR_TYPE_MSIX;
3688 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3689 			    "==> hxge_add_intrs: "
3690 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3691 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3692 			type = DDI_INTR_TYPE_FIXED;
3693 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3694 			    "==> hxge_add_intrs: "
3695 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3696 		}
3697 	}
3698 
3699 	hxgep->hxge_intr_type.intr_type = type;
3700 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3701 	    type == DDI_INTR_TYPE_FIXED) &&
3702 	    hxgep->hxge_intr_type.niu_msi_enable) {
3703 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3704 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3705 			    " hxge_add_intrs: "
3706 			    " hxge_add_intrs_adv failed: status 0x%08x",
3707 			    status));
3708 			return (status);
3709 		} else {
3710 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3711 			    "interrupts registered : type %d", type));
3712 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3713 
3714 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3715 			    "\nAdded advanced hxge add_intr_adv "
3716 			    "intr type 0x%x\n", type));
3717 
3718 			return (status);
3719 		}
3720 	}
3721 
3722 	if (!hxgep->hxge_intr_type.intr_registered) {
3723 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3724 		    "==> hxge_add_intrs: failed to register interrupts"));
3725 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3726 	}
3727 
3728 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3729 
3730 	return (status);
3731 }
3732 
3733 /*ARGSUSED*/
3734 static hxge_status_t
3735 hxge_add_soft_intrs(p_hxge_t hxgep)
3736 {
3737 	int		ddi_status = DDI_SUCCESS;
3738 	hxge_status_t	status = HXGE_OK;
3739 
3740 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
3741 
3742 	hxgep->resched_id = NULL;
3743 	hxgep->resched_running = B_FALSE;
3744 	ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
3745 	    &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
3746 	if (ddi_status != DDI_SUCCESS) {
3747 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
3748 		    "ddi_add_softintrs failed: status 0x%08x", ddi_status));
3749 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3750 	}
3751 
3752 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
3753 
3754 	return (status);
3755 }
3756 
3757 /*ARGSUSED*/
3758 static hxge_status_t
3759 hxge_add_intrs_adv(p_hxge_t hxgep)
3760 {
3761 	int		intr_type;
3762 	p_hxge_intr_t	intrp;
3763 	hxge_status_t	status;
3764 
3765 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3766 
3767 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3768 	intr_type = intrp->intr_type;
3769 
3770 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3771 	    intr_type));
3772 
3773 	switch (intr_type) {
3774 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3775 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3776 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3777 		break;
3778 
3779 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3780 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3781 		break;
3782 
3783 	default:
3784 		status = HXGE_ERROR;
3785 		break;
3786 	}
3787 
3788 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3789 
3790 	return (status);
3791 }
3792 
3793 /*ARGSUSED*/
3794 static hxge_status_t
3795 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3796 {
3797 	dev_info_t	*dip = hxgep->dip;
3798 	p_hxge_ldg_t	ldgp;
3799 	p_hxge_intr_t	intrp;
3800 	uint_t		*inthandler;
3801 	void		*arg1, *arg2;
3802 	int		behavior;
3803 	int		nintrs, navail;
3804 	int		nactual, nrequired;
3805 	int		inum = 0;
3806 	int		loop = 0;
3807 	int		x, y;
3808 	int		ddi_status = DDI_SUCCESS;
3809 	hxge_status_t	status = HXGE_OK;
3810 
3811 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3812 
3813 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3814 
3815 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3816 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3817 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3818 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3819 		    "nintrs: %d", ddi_status, nintrs));
3820 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3821 	}
3822 
3823 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3824 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3825 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3826 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3827 		    "nintrs: %d", ddi_status, navail));
3828 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3829 	}
3830 
3831 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3832 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3833 	    int_type, nintrs, navail));
3834 
3835 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3836 		/* MSI must be power of 2 */
3837 		if ((navail & 16) == 16) {
3838 			navail = 16;
3839 		} else if ((navail & 8) == 8) {
3840 			navail = 8;
3841 		} else if ((navail & 4) == 4) {
3842 			navail = 4;
3843 		} else if ((navail & 2) == 2) {
3844 			navail = 2;
3845 		} else {
3846 			navail = 1;
3847 		}
3848 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3849 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3850 		    "navail %d", nintrs, navail));
3851 	}
3852 
3853 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3854 	    "requesting: intr type %d nintrs %d, navail %d",
3855 	    int_type, nintrs, navail));
3856 
3857 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3858 	    DDI_INTR_ALLOC_NORMAL);
3859 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3860 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3861 
3862 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3863 	    navail, &nactual, behavior);
3864 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3865 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3866 		    " ddi_intr_alloc() failed: %d", ddi_status));
3867 		kmem_free(intrp->htable, intrp->intr_size);
3868 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3869 	}
3870 
3871 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3872 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3873 	    navail, nactual));
3874 
3875 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3876 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3877 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3878 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3879 		/* Free already allocated interrupts */
3880 		for (y = 0; y < nactual; y++) {
3881 			(void) ddi_intr_free(intrp->htable[y]);
3882 		}
3883 
3884 		kmem_free(intrp->htable, intrp->intr_size);
3885 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3886 	}
3887 
3888 	nrequired = 0;
3889 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3890 	if (status != HXGE_OK) {
3891 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3892 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3893 		    "failed: 0x%x", status));
3894 		/* Free already allocated interrupts */
3895 		for (y = 0; y < nactual; y++) {
3896 			(void) ddi_intr_free(intrp->htable[y]);
3897 		}
3898 
3899 		kmem_free(intrp->htable, intrp->intr_size);
3900 		return (status);
3901 	}
3902 
3903 	ldgp = hxgep->ldgvp->ldgp;
3904 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3905 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3906 
3907 	if (nactual < nrequired)
3908 		loop = nactual;
3909 	else
3910 		loop = nrequired;
3911 
3912 	for (x = 0; x < loop; x++, ldgp++) {
3913 		ldgp->vector = (uint8_t)x;
3914 		arg1 = ldgp->ldvp;
3915 		arg2 = hxgep;
3916 		if (ldgp->nldvs == 1) {
3917 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3918 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3919 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3920 			    "1-1 int handler (entry %d)\n",
3921 			    arg1, arg2, x));
3922 		} else if (ldgp->nldvs > 1) {
3923 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3924 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3925 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3926 			    "nldevs %d int handler (entry %d)\n",
3927 			    arg1, arg2, ldgp->nldvs, x));
3928 		}
3929 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3930 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3931 		    "htable 0x%llx", x, intrp->htable[x]));
3932 
3933 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3934 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3935 		    DDI_SUCCESS) {
3936 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3937 			    "==> hxge_add_intrs_adv_type: failed #%d "
3938 			    "status 0x%x", x, ddi_status));
3939 			for (y = 0; y < intrp->intr_added; y++) {
3940 				(void) ddi_intr_remove_handler(
3941 				    intrp->htable[y]);
3942 			}
3943 
3944 			/* Free already allocated intr */
3945 			for (y = 0; y < nactual; y++) {
3946 				(void) ddi_intr_free(intrp->htable[y]);
3947 			}
3948 			kmem_free(intrp->htable, intrp->intr_size);
3949 
3950 			(void) hxge_ldgv_uninit(hxgep);
3951 
3952 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3953 		}
3954 
3955 		intrp->intr_added++;
3956 	}
3957 	intrp->msi_intx_cnt = nactual;
3958 
3959 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3960 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3961 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3962 
3963 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3964 	(void) hxge_intr_ldgv_init(hxgep);
3965 
3966 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3967 
3968 	return (status);
3969 }
3970 
3971 /*ARGSUSED*/
3972 static hxge_status_t
3973 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3974 {
3975 	dev_info_t	*dip = hxgep->dip;
3976 	p_hxge_ldg_t	ldgp;
3977 	p_hxge_intr_t	intrp;
3978 	uint_t		*inthandler;
3979 	void		*arg1, *arg2;
3980 	int		behavior;
3981 	int		nintrs, navail;
3982 	int		nactual, nrequired;
3983 	int		inum = 0;
3984 	int		x, y;
3985 	int		ddi_status = DDI_SUCCESS;
3986 	hxge_status_t	status = HXGE_OK;
3987 
3988 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3989 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3990 
3991 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3992 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3993 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3994 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3995 		    "nintrs: %d", status, nintrs));
3996 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3997 	}
3998 
3999 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
4000 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4001 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4002 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
4003 		    "nintrs: %d", ddi_status, navail));
4004 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4005 	}
4006 
4007 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
4008 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4009 	    nintrs, navail));
4010 
4011 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4012 	    DDI_INTR_ALLOC_NORMAL);
4013 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4014 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4015 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4016 	    navail, &nactual, behavior);
4017 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
4018 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4019 		    " ddi_intr_alloc() failed: %d", ddi_status));
4020 		kmem_free(intrp->htable, intrp->intr_size);
4021 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4022 	}
4023 
4024 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4025 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4026 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4027 		    " ddi_intr_get_pri() failed: %d", ddi_status));
4028 		/* Free already allocated interrupts */
4029 		for (y = 0; y < nactual; y++) {
4030 			(void) ddi_intr_free(intrp->htable[y]);
4031 		}
4032 
4033 		kmem_free(intrp->htable, intrp->intr_size);
4034 		return (HXGE_ERROR | HXGE_DDI_FAILED);
4035 	}
4036 
4037 	nrequired = 0;
4038 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4039 	if (status != HXGE_OK) {
4040 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4041 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4042 		    "failed: 0x%x", status));
4043 		/* Free already allocated interrupts */
4044 		for (y = 0; y < nactual; y++) {
4045 			(void) ddi_intr_free(intrp->htable[y]);
4046 		}
4047 
4048 		kmem_free(intrp->htable, intrp->intr_size);
4049 		return (status);
4050 	}
4051 
4052 	ldgp = hxgep->ldgvp->ldgp;
4053 	for (x = 0; x < nrequired; x++, ldgp++) {
4054 		ldgp->vector = (uint8_t)x;
4055 		arg1 = ldgp->ldvp;
4056 		arg2 = hxgep;
4057 		if (ldgp->nldvs == 1) {
4058 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4059 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4060 			    "hxge_add_intrs_adv_type_fix: "
4061 			    "1-1 int handler(%d) ldg %d ldv %d "
4062 			    "arg1 $%p arg2 $%p\n",
4063 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4064 		} else if (ldgp->nldvs > 1) {
4065 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4066 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
4067 			    "hxge_add_intrs_adv_type_fix: "
4068 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
4069 			    "arg1 0x%016llx arg2 0x%016llx\n",
4070 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4071 			    arg1, arg2));
4072 		}
4073 
4074 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4075 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4076 		    DDI_SUCCESS) {
4077 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4078 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
4079 			    "status 0x%x", x, ddi_status));
4080 			for (y = 0; y < intrp->intr_added; y++) {
4081 				(void) ddi_intr_remove_handler(
4082 				    intrp->htable[y]);
4083 			}
4084 			for (y = 0; y < nactual; y++) {
4085 				(void) ddi_intr_free(intrp->htable[y]);
4086 			}
4087 			/* Free already allocated intr */
4088 			kmem_free(intrp->htable, intrp->intr_size);
4089 
4090 			(void) hxge_ldgv_uninit(hxgep);
4091 
4092 			return (HXGE_ERROR | HXGE_DDI_FAILED);
4093 		}
4094 		intrp->intr_added++;
4095 	}
4096 
4097 	intrp->msi_intx_cnt = nactual;
4098 
4099 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4100 
4101 	status = hxge_intr_ldgv_init(hxgep);
4102 
4103 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4104 
4105 	return (status);
4106 }
4107 
4108 /*ARGSUSED*/
4109 static void
4110 hxge_remove_intrs(p_hxge_t hxgep)
4111 {
4112 	int		i, inum;
4113 	p_hxge_intr_t	intrp;
4114 
4115 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4116 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4117 	if (!intrp->intr_registered) {
4118 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4119 		    "<== hxge_remove_intrs: interrupts not registered"));
4120 		return;
4121 	}
4122 
4123 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4124 
4125 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4126 		(void) ddi_intr_block_disable(intrp->htable,
4127 		    intrp->intr_added);
4128 	} else {
4129 		for (i = 0; i < intrp->intr_added; i++) {
4130 			(void) ddi_intr_disable(intrp->htable[i]);
4131 		}
4132 	}
4133 
4134 	for (inum = 0; inum < intrp->intr_added; inum++) {
4135 		if (intrp->htable[inum]) {
4136 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
4137 		}
4138 	}
4139 
4140 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4141 		if (intrp->htable[inum]) {
4142 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4143 			    "hxge_remove_intrs: ddi_intr_free inum %d "
4144 			    "msi_intx_cnt %d intr_added %d",
4145 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
4146 
4147 			(void) ddi_intr_free(intrp->htable[inum]);
4148 		}
4149 	}
4150 
4151 	kmem_free(intrp->htable, intrp->intr_size);
4152 	intrp->intr_registered = B_FALSE;
4153 	intrp->intr_enabled = B_FALSE;
4154 	intrp->msi_intx_cnt = 0;
4155 	intrp->intr_added = 0;
4156 
4157 	(void) hxge_ldgv_uninit(hxgep);
4158 
4159 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4160 }
4161 
4162 /*ARGSUSED*/
4163 static void
4164 hxge_remove_soft_intrs(p_hxge_t hxgep)
4165 {
4166 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
4167 
4168 	if (hxgep->resched_id) {
4169 		ddi_remove_softintr(hxgep->resched_id);
4170 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4171 		    "==> hxge_remove_soft_intrs: removed"));
4172 		hxgep->resched_id = NULL;
4173 	}
4174 
4175 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
4176 }
4177 
4178 /*ARGSUSED*/
4179 void
4180 hxge_intrs_enable(p_hxge_t hxgep)
4181 {
4182 	p_hxge_intr_t	intrp;
4183 	int		i;
4184 	int		status;
4185 
4186 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4187 
4188 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4189 
4190 	if (!intrp->intr_registered) {
4191 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4192 		    "interrupts are not registered"));
4193 		return;
4194 	}
4195 
4196 	if (intrp->intr_enabled) {
4197 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4198 		    "<== hxge_intrs_enable: already enabled"));
4199 		return;
4200 	}
4201 
4202 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4203 		status = ddi_intr_block_enable(intrp->htable,
4204 		    intrp->intr_added);
4205 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4206 		    "block enable - status 0x%x total inums #%d\n",
4207 		    status, intrp->intr_added));
4208 	} else {
4209 		for (i = 0; i < intrp->intr_added; i++) {
4210 			status = ddi_intr_enable(intrp->htable[i]);
4211 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4212 			    "ddi_intr_enable:enable - status 0x%x "
4213 			    "total inums %d enable inum #%d\n",
4214 			    status, intrp->intr_added, i));
4215 			if (status == DDI_SUCCESS) {
4216 				intrp->intr_enabled = B_TRUE;
4217 			}
4218 		}
4219 	}
4220 
4221 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4222 }
4223 
4224 /*ARGSUSED*/
4225 static void
4226 hxge_intrs_disable(p_hxge_t hxgep)
4227 {
4228 	p_hxge_intr_t	intrp;
4229 	int		i;
4230 
4231 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4232 
4233 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4234 
4235 	if (!intrp->intr_registered) {
4236 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4237 		    "interrupts are not registered"));
4238 		return;
4239 	}
4240 
4241 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4242 		(void) ddi_intr_block_disable(intrp->htable,
4243 		    intrp->intr_added);
4244 	} else {
4245 		for (i = 0; i < intrp->intr_added; i++) {
4246 			(void) ddi_intr_disable(intrp->htable[i]);
4247 		}
4248 	}
4249 
4250 	intrp->intr_enabled = B_FALSE;
4251 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4252 }
4253 
4254 static hxge_status_t
4255 hxge_mac_register(p_hxge_t hxgep)
4256 {
4257 	mac_register_t	*macp;
4258 	int		status;
4259 
4260 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4261 
4262 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4263 		return (HXGE_ERROR);
4264 
4265 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4266 	macp->m_driver = hxgep;
4267 	macp->m_dip = hxgep->dip;
4268 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4269 
4270 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4271 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4272 	    macp->m_src_addr[0],
4273 	    macp->m_src_addr[1],
4274 	    macp->m_src_addr[2],
4275 	    macp->m_src_addr[3],
4276 	    macp->m_src_addr[4],
4277 	    macp->m_src_addr[5]));
4278 
4279 	macp->m_callbacks = &hxge_m_callbacks;
4280 	macp->m_min_sdu = 0;
4281 	macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4282 	macp->m_margin = VLAN_TAGSZ;
4283 	macp->m_priv_props = hxge_priv_props;
4284 	macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS;
4285 
4286 	status = mac_register(macp, &hxgep->mach);
4287 	mac_free(macp);
4288 
4289 	if (status != 0) {
4290 		cmn_err(CE_WARN,
4291 		    "hxge_mac_register failed (status %d instance %d)",
4292 		    status, hxgep->instance);
4293 		return (HXGE_ERROR);
4294 	}
4295 
4296 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4297 	    "(instance %d)", hxgep->instance));
4298 
4299 	return (HXGE_OK);
4300 }
4301 
4302 static int
4303 hxge_init_common_dev(p_hxge_t hxgep)
4304 {
4305 	p_hxge_hw_list_t	hw_p;
4306 	dev_info_t		*p_dip;
4307 
4308 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4309 
4310 	p_dip = hxgep->p_dip;
4311 	MUTEX_ENTER(&hxge_common_lock);
4312 
4313 	/*
4314 	 * Loop through existing per Hydra hardware list.
4315 	 */
4316 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4317 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4318 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4319 		    hw_p, p_dip));
4320 		if (hw_p->parent_devp == p_dip) {
4321 			hxgep->hxge_hw_p = hw_p;
4322 			hw_p->ndevs++;
4323 			hw_p->hxge_p = hxgep;
4324 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4325 			    "==> hxge_init_common_device: "
4326 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4327 			    hw_p, p_dip, hw_p->ndevs));
4328 			break;
4329 		}
4330 	}
4331 
4332 	if (hw_p == NULL) {
4333 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4334 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4335 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4336 		hw_p->parent_devp = p_dip;
4337 		hw_p->magic = HXGE_MAGIC;
4338 		hxgep->hxge_hw_p = hw_p;
4339 		hw_p->ndevs++;
4340 		hw_p->hxge_p = hxgep;
4341 		hw_p->next = hxge_hw_list;
4342 
4343 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4344 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4345 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4346 
4347 		hxge_hw_list = hw_p;
4348 	}
4349 	MUTEX_EXIT(&hxge_common_lock);
4350 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4351 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4352 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4353 
4354 	return (HXGE_OK);
4355 }
4356 
4357 static void
4358 hxge_uninit_common_dev(p_hxge_t hxgep)
4359 {
4360 	p_hxge_hw_list_t	hw_p, h_hw_p;
4361 	dev_info_t		*p_dip;
4362 
4363 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4364 	if (hxgep->hxge_hw_p == NULL) {
4365 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4366 		    "<== hxge_uninit_common_dev (no common)"));
4367 		return;
4368 	}
4369 
4370 	MUTEX_ENTER(&hxge_common_lock);
4371 	h_hw_p = hxge_hw_list;
4372 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4373 		p_dip = hw_p->parent_devp;
4374 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4375 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4376 		    hw_p->magic == HXGE_MAGIC) {
4377 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4378 			    "==> hxge_uninit_common_dev: "
4379 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4380 			    hw_p, p_dip, hw_p->ndevs));
4381 
4382 			hxgep->hxge_hw_p = NULL;
4383 			if (hw_p->ndevs) {
4384 				hw_p->ndevs--;
4385 			}
4386 			hw_p->hxge_p = NULL;
4387 			if (!hw_p->ndevs) {
4388 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4389 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4390 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4391 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4392 				    "==> hxge_uninit_common_dev: "
4393 				    "hw_p $%p parent dip $%p ndevs %d (last)",
4394 				    hw_p, p_dip, hw_p->ndevs));
4395 
4396 				if (hw_p == hxge_hw_list) {
4397 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4398 					    "==> hxge_uninit_common_dev:"
4399 					    "remove head "
4400 					    "hw_p $%p parent dip $%p "
4401 					    "ndevs %d (head)",
4402 					    hw_p, p_dip, hw_p->ndevs));
4403 					hxge_hw_list = hw_p->next;
4404 				} else {
4405 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4406 					    "==> hxge_uninit_common_dev:"
4407 					    "remove middle "
4408 					    "hw_p $%p parent dip $%p "
4409 					    "ndevs %d (middle)",
4410 					    hw_p, p_dip, hw_p->ndevs));
4411 					h_hw_p->next = hw_p->next;
4412 				}
4413 
4414 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4415 			}
4416 			break;
4417 		} else {
4418 			h_hw_p = hw_p;
4419 		}
4420 	}
4421 
4422 	MUTEX_EXIT(&hxge_common_lock);
4423 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4424 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4425 
4426 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4427 }
4428 
4429 static void
4430 hxge_link_poll(void *arg)
4431 {
4432 	p_hxge_t		hxgep = (p_hxge_t)arg;
4433 	hpi_handle_t		handle;
4434 	cip_link_stat_t		link_stat;
4435 	hxge_timeout		*to = &hxgep->timeout;
4436 
4437 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
4438 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4439 
4440 	if (to->report_link_status ||
4441 	    (to->link_status != link_stat.bits.xpcs0_link_up)) {
4442 		to->link_status = link_stat.bits.xpcs0_link_up;
4443 		to->report_link_status = B_FALSE;
4444 
4445 		if (link_stat.bits.xpcs0_link_up) {
4446 			hxge_link_update(hxgep, LINK_STATE_UP);
4447 		} else {
4448 			hxge_link_update(hxgep, LINK_STATE_DOWN);
4449 		}
4450 	}
4451 
4452 	/* Restart the link status timer to check the link status */
4453 	MUTEX_ENTER(&to->lock);
4454 	to->id = timeout(hxge_link_poll, arg, to->ticks);
4455 	MUTEX_EXIT(&to->lock);
4456 }
4457 
4458 static void
4459 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4460 {
4461 	p_hxge_stats_t		statsp = (p_hxge_stats_t)hxgep->statsp;
4462 
4463 	mac_link_update(hxgep->mach, state);
4464 	if (state == LINK_STATE_UP) {
4465 		statsp->mac_stats.link_speed = 10000;
4466 		statsp->mac_stats.link_duplex = 2;
4467 		statsp->mac_stats.link_up = 1;
4468 	} else {
4469 		statsp->mac_stats.link_speed = 0;
4470 		statsp->mac_stats.link_duplex = 0;
4471 		statsp->mac_stats.link_up = 0;
4472 	}
4473 }
4474