xref: /titanic_41/usr/src/uts/common/io/hxge/hxge_hw.c (revision 40db2e2b777b79f3dd0d6d9629593a07f86b9c0a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <hxge_impl.h>
29 
30 lb_property_t lb_normal = {normal, "normal", hxge_lb_normal};
31 lb_property_t lb_mac10g = {internal, "mac10g", hxge_lb_mac10g};
32 
33 uint32_t hxge_lb_dbg = 1;
34 
35 extern uint32_t hxge_jumbo_mtu;
36 extern boolean_t hxge_jumbo_enable;
37 
38 static void hxge_rtrace_ioctl(p_hxge_t, queue_t *, mblk_t *, struct iocblk *);
39 
40 void
41 hxge_global_reset(p_hxge_t hxgep)
42 {
43 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_global_reset"));
44 
45 	(void) hxge_intr_hw_disable(hxgep);
46 
47 	if (hxgep->suspended)
48 		(void) hxge_link_init(hxgep);
49 
50 	(void) hxge_vmac_init(hxgep);
51 
52 	(void) hxge_intr_hw_enable(hxgep);
53 
54 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_global_reset"));
55 }
56 
57 
58 void
59 hxge_hw_id_init(p_hxge_t hxgep)
60 {
61 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init"));
62 
63 	/*
64 	 * Set up initial hardware parameters required such as mac mtu size.
65 	 */
66 	hxgep->vmac.is_jumbo = B_FALSE;
67 	/* 1518 + 4 + 16 */
68 	hxgep->vmac.maxframesize = STD_FRAME_SIZE + TX_PKT_HEADER_SIZE;
69 	if (hxgep->param_arr[param_accept_jumbo].value || hxge_jumbo_enable) {
70 		hxgep->vmac.maxframesize = (uint16_t)hxge_jumbo_mtu;
71 		hxgep->vmac.is_jumbo = B_TRUE;
72 	}
73 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init: maxframesize %d",
74 	    hxgep->vmac.maxframesize));
75 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_id_init"));
76 }
77 
78 void
79 hxge_hw_init_niu_common(p_hxge_t hxgep)
80 {
81 	p_hxge_hw_list_t hw_p;
82 
83 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_init_niu_common"));
84 
85 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
86 		return;
87 	}
88 
89 	MUTEX_ENTER(&hw_p->hxge_cfg_lock);
90 	if (hw_p->flags & COMMON_INIT_DONE) {
91 		HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common"
92 		    " already done for dip $%p exiting", hw_p->parent_devp));
93 		MUTEX_EXIT(&hw_p->hxge_cfg_lock);
94 		return;
95 	}
96 
97 	hw_p->flags = COMMON_INIT_START;
98 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
99 	    "hxge_hw_init_niu_common Started for device id %x",
100 	    hw_p->parent_devp));
101 
102 	(void) hxge_pfc_hw_reset(hxgep);
103 	hw_p->flags = COMMON_INIT_DONE;
104 	MUTEX_EXIT(&hw_p->hxge_cfg_lock);
105 
106 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
107 	    "hxge_hw_init_niu_common Done for device id %x",
108 	    hw_p->parent_devp));
109 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_init_niu_common"));
110 }
111 
112 uint_t
113 hxge_intr(caddr_t arg1, caddr_t arg2)
114 {
115 	p_hxge_ldv_t		ldvp = (p_hxge_ldv_t)arg1;
116 	p_hxge_t		hxgep = (p_hxge_t)arg2;
117 	uint_t			serviced = DDI_INTR_UNCLAIMED;
118 	uint8_t			ldv;
119 	hpi_handle_t		handle;
120 	p_hxge_ldgv_t		ldgvp;
121 	p_hxge_ldg_t		ldgp, t_ldgp;
122 	p_hxge_ldv_t		t_ldvp;
123 	uint32_t		vector0 = 0, vector1 = 0;
124 	int			i, j, nldvs, nintrs = 1;
125 	hpi_status_t		rs = HPI_SUCCESS;
126 
127 	/*
128 	 * DDI interface returns second arg as NULL
129 	 */
130 	if ((arg2 == NULL) || ((void *) ldvp->hxgep != arg2)) {
131 		hxgep = ldvp->hxgep;
132 	}
133 
134 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr"));
135 
136 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
137 		HXGE_ERROR_MSG((hxgep, INT_CTL,
138 		    "<== hxge_intr: not initialized 0x%x", serviced));
139 		return (serviced);
140 	}
141 
142 	ldgvp = hxgep->ldgvp;
143 
144 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: ldgvp $%p", ldgvp));
145 
146 	if (ldvp == NULL && ldgvp) {
147 		t_ldvp = ldvp = ldgvp->ldvp;
148 	}
149 
150 	if (ldvp) {
151 		ldgp = t_ldgp = ldvp->ldgp;
152 	}
153 
154 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
155 	    "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
156 
157 	if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
158 		HXGE_ERROR_MSG((hxgep, INT_CTL, "==> hxge_intr: "
159 		    "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
160 		HXGE_ERROR_MSG((hxgep, INT_CTL, "<== hxge_intr: not ready"));
161 		return (DDI_INTR_UNCLAIMED);
162 	}
163 
164 	/*
165 	 * This interrupt handler will have to go through
166 	 * all the logical devices to find out which
167 	 * logical device interrupts us and then call
168 	 * its handler to process the events.
169 	 */
170 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
171 	t_ldgp = ldgp;
172 	t_ldvp = ldgp->ldvp;
173 
174 	nldvs = ldgp->nldvs;
175 
176 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: #ldvs %d #intrs %d",
177 	    nldvs, ldgvp->ldg_intrs));
178 
179 	serviced = DDI_INTR_CLAIMED;
180 	for (i = 0; i < nintrs; i++, t_ldgp++) {
181 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr(%d): #ldvs %d "
182 		    " #intrs %d", i, nldvs, nintrs));
183 
184 		/* Get this group's flag bits. */
185 		t_ldgp->interrupted = B_FALSE;
186 		rs = hpi_ldsv_ldfs_get(handle, t_ldgp->ldg, &vector0, &vector1);
187 		if (rs) {
188 			continue;
189 		}
190 
191 		if (!vector0 && !vector1) {
192 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
193 			    "no interrupts on group %d", t_ldgp->ldg));
194 			continue;
195 		}
196 
197 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
198 		    "vector0 0x%llx vector1 0x%llx", vector0, vector1));
199 
200 		t_ldgp->interrupted = B_TRUE;
201 		nldvs = t_ldgp->nldvs;
202 
203 		for (j = 0; j < nldvs; j++, t_ldvp++) {
204 			/*
205 			 * Call device's handler if flag bits are on.
206 			 */
207 			ldv = t_ldvp->ldv;
208 			if ((LDV_ON(ldv, vector0) | (LDV_ON(ldv, vector1)))) {
209 				HXGE_DEBUG_MSG((hxgep, INT_CTL,
210 				    "==> hxge_intr: calling device %d"
211 				    " #ldvs %d #intrs %d", j, nldvs, nintrs));
212 				(void) (t_ldvp->ldv_intr_handler)(
213 				    (caddr_t)t_ldvp, arg2);
214 			}
215 		}
216 	}
217 
218 	t_ldgp = ldgp;
219 	for (i = 0; i < nintrs; i++, t_ldgp++) {
220 		/* rearm group interrupts */
221 		if (t_ldgp->interrupted) {
222 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
223 			    "==> hxge_intr: arm group %d", t_ldgp->ldg));
224 			(void) hpi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
225 			    t_ldgp->arm, t_ldgp->ldg_timer);
226 		}
227 	}
228 
229 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr: serviced 0x%x",
230 	    serviced));
231 	return (serviced);
232 }
233 
234 hxge_status_t
235 hxge_peu_handle_sys_errors(p_hxge_t hxgep)
236 {
237 	hpi_handle_t		handle;
238 	p_hxge_peu_sys_stats_t	statsp;
239 	peu_intr_stat_t		stat;
240 
241 	handle = hxgep->hpi_handle;
242 	statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
243 
244 	HXGE_REG_RD64(handle, PEU_INTR_STAT, &stat.value);
245 
246 	/*
247 	 * The PCIE errors are unrecoverrable and cannot be cleared.
248 	 * The only thing we can do here is to mask them off to prevent
249 	 * continued interrupts.
250 	 */
251 	HXGE_REG_WR64(handle, PEU_INTR_MASK, 0xffffffff);
252 
253 	if (stat.bits.spc_acc_err) {
254 		statsp->spc_acc_err++;
255 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
256 		    "==> hxge_peu_handle_sys_errors: spc_acc_err"));
257 	}
258 
259 	if (stat.bits.tdc_pioacc_err) {
260 		statsp->tdc_pioacc_err++;
261 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
262 		    "==> hxge_peu_handle_sys_errors: tdc_pioacc_err"));
263 	}
264 
265 	if (stat.bits.rdc_pioacc_err) {
266 		statsp->rdc_pioacc_err++;
267 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
268 		    "==> hxge_peu_handle_sys_errors: rdc_pioacc_err"));
269 	}
270 
271 	if (stat.bits.pfc_pioacc_err) {
272 		statsp->pfc_pioacc_err++;
273 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
274 		    "==> hxge_peu_handle_sys_errors: pfc_pioacc_err"));
275 	}
276 
277 	if (stat.bits.vmac_pioacc_err) {
278 		statsp->vmac_pioacc_err++;
279 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
280 		    "==> hxge_peu_handle_sys_errors: vmac_pioacc_err"));
281 	}
282 
283 	if (stat.bits.cpl_hdrq_parerr) {
284 		statsp->cpl_hdrq_parerr++;
285 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
286 		    "==> hxge_peu_handle_sys_errors: cpl_hdrq_parerr"));
287 	}
288 
289 	if (stat.bits.cpl_dataq_parerr) {
290 		statsp->cpl_dataq_parerr++;
291 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
292 		    "==> hxge_peu_handle_sys_errors: cpl_dataq_parerr"));
293 	}
294 
295 	if (stat.bits.retryram_xdlh_parerr) {
296 		statsp->retryram_xdlh_parerr++;
297 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
298 		    "==> hxge_peu_handle_sys_errors: retryram_xdlh_parerr"));
299 	}
300 
301 	if (stat.bits.retrysotram_xdlh_parerr) {
302 		statsp->retrysotram_xdlh_parerr++;
303 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
304 		    "==> hxge_peu_handle_sys_errors: retrysotram_xdlh_parerr"));
305 	}
306 
307 	if (stat.bits.p_hdrq_parerr) {
308 		statsp->p_hdrq_parerr++;
309 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
310 		    "==> hxge_peu_handle_sys_errors: p_hdrq_parerr"));
311 	}
312 
313 	if (stat.bits.p_dataq_parerr) {
314 		statsp->p_dataq_parerr++;
315 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
316 		    "==> hxge_peu_handle_sys_errors: p_dataq_parerr"));
317 	}
318 
319 	if (stat.bits.np_hdrq_parerr) {
320 		statsp->np_hdrq_parerr++;
321 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
322 		    "==> hxge_peu_handle_sys_errors: np_hdrq_parerr"));
323 	}
324 
325 	if (stat.bits.np_dataq_parerr) {
326 		statsp->np_dataq_parerr++;
327 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
328 		    "==> hxge_peu_handle_sys_errors: np_dataq_parerr"));
329 	}
330 
331 	if (stat.bits.eic_msix_parerr) {
332 		statsp->eic_msix_parerr++;
333 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
334 		    "==> hxge_peu_handle_sys_errors: eic_msix_parerr"));
335 	}
336 
337 	if (stat.bits.hcr_parerr) {
338 		statsp->hcr_parerr++;
339 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
340 		    "==> hxge_peu_handle_sys_errors: hcr_parerr"));
341 	}
342 
343 	return (HXGE_OK);
344 }
345 
346 /*ARGSUSED*/
347 uint_t
348 hxge_syserr_intr(caddr_t arg1, caddr_t arg2)
349 {
350 	p_hxge_ldv_t	ldvp = (p_hxge_ldv_t)arg1;
351 	p_hxge_t	hxgep = (p_hxge_t)arg2;
352 	p_hxge_ldg_t	ldgp = NULL;
353 	hpi_handle_t	handle;
354 	dev_err_stat_t	estat;
355 	uint_t		serviced = DDI_INTR_UNCLAIMED;
356 
357 	if ((arg1 == NULL) && (arg2 == NULL)) {
358 		return (serviced);
359 	}
360 
361 	if ((arg2 == NULL) ||
362 	    ((ldvp != NULL) && ((void *)ldvp->hxgep != arg2))) {
363 		if (ldvp != NULL) {
364 			hxgep = ldvp->hxgep;
365 		}
366 	}
367 
368 	HXGE_DEBUG_MSG((hxgep, SYSERR_CTL,
369 	    "==> hxge_syserr_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
370 
371 	if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
372 		ldgp = ldvp->ldgp;
373 		if (ldgp == NULL) {
374 			HXGE_ERROR_MSG((hxgep, SYSERR_CTL,
375 			    "<== hxge_syserrintr(no logical group): "
376 			    "arg2 $%p arg1 $%p", hxgep, ldvp));
377 			return (DDI_INTR_UNCLAIMED);
378 		}
379 		/*
380 		 * Get the logical device state if the function uses interrupt.
381 		 */
382 	}
383 
384 	/* This interrupt handler is for system error interrupts.  */
385 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
386 	estat.value = 0;
387 	(void) hpi_fzc_sys_err_stat_get(handle, &estat);
388 	HXGE_DEBUG_MSG((hxgep, SYSERR_CTL,
389 	    "==> hxge_syserr_intr: device error 0x%016llx", estat.value));
390 
391 	if (estat.bits.tdc_err0 || estat.bits.tdc_err1) {
392 		/* TDMC */
393 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
394 		    "==> hxge_syserr_intr: device error - TDMC"));
395 		(void) hxge_txdma_handle_sys_errors(hxgep);
396 	} else if (estat.bits.rdc_err0 || estat.bits.rdc_err1) {
397 		/* RDMC */
398 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
399 		    "==> hxge_syserr_intr: device error - RDMC"));
400 		(void) hxge_rxdma_handle_sys_errors(hxgep);
401 	} else if (estat.bits.vnm_pio_err1 || estat.bits.peu_err1) {
402 		/* PCI-E */
403 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
404 		    "==> hxge_syserr_intr: device error - PCI-E"));
405 
406 		/* kstats are updated here */
407 		(void) hxge_peu_handle_sys_errors(hxgep);
408 
409 		if (estat.bits.peu_err1)
410 			HXGE_FM_REPORT_ERROR(hxgep, NULL,
411 			    HXGE_FM_EREPORT_PEU_ERR);
412 
413 		if (estat.bits.vnm_pio_err1)
414 			HXGE_FM_REPORT_ERROR(hxgep, NULL,
415 			    HXGE_FM_EREPORT_PEU_VNM_PIO_ERR);
416 	} else if (estat.value != 0) {
417 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
418 		    "==> hxge_syserr_intr: device error - unknown"));
419 	}
420 
421 	serviced = DDI_INTR_CLAIMED;
422 
423 	if ((ldgp != NULL) && (ldvp != NULL) &&
424 	    (ldgp->nldvs == 1) && !ldvp->use_timer) {
425 		(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
426 		    B_TRUE, ldgp->ldg_timer);
427 	}
428 
429 	HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_syserr_intr"));
430 	return (serviced);
431 }
432 
433 void
434 hxge_intr_hw_enable(p_hxge_t hxgep)
435 {
436 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_enable"));
437 
438 	(void) hxge_intr_mask_mgmt_set(hxgep, B_TRUE);
439 
440 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_enable"));
441 }
442 
443 void
444 hxge_intr_hw_disable(p_hxge_t hxgep)
445 {
446 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_disable"));
447 
448 	(void) hxge_intr_mask_mgmt_set(hxgep, B_FALSE);
449 
450 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_disable"));
451 }
452 
453 void
454 hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
455 {
456 	p_hxge_t	hxgep = (p_hxge_t)arg;
457 
458 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_hw_blank"));
459 
460 	/*
461 	 * Replace current ticks and counts for later
462 	 * processing by the receive packet interrupt routines.
463 	 */
464 	hxgep->intr_timeout = (uint16_t)ticks;
465 	hxgep->intr_threshold = (uint16_t)count;
466 
467 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_hw_blank"));
468 }
469 
470 void
471 hxge_hw_stop(p_hxge_t hxgep)
472 {
473 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_stop"));
474 
475 	(void) hxge_tx_vmac_disable(hxgep);
476 	(void) hxge_rx_vmac_disable(hxgep);
477 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
478 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
479 
480 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_stop"));
481 }
482 
483 void
484 hxge_hw_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
485 {
486 	int cmd;
487 
488 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_hw_ioctl"));
489 
490 	if (hxgep == NULL) {
491 		miocnak(wq, mp, 0, EINVAL);
492 		return;
493 	}
494 
495 	iocp->ioc_error = 0;
496 	cmd = iocp->ioc_cmd;
497 
498 	switch (cmd) {
499 	default:
500 		miocnak(wq, mp, 0, EINVAL);
501 		return;
502 
503 	case HXGE_GET64:
504 		hxge_get64(hxgep, mp->b_cont);
505 		miocack(wq, mp, sizeof (uint32_t), 0);
506 		break;
507 
508 	case HXGE_PUT64:
509 		hxge_put64(hxgep, mp->b_cont);
510 		miocack(wq, mp, 0, 0);
511 		break;
512 
513 	case HXGE_PUT_TCAM:
514 		hxge_put_tcam(hxgep, mp->b_cont);
515 		miocack(wq, mp, 0, 0);
516 		break;
517 
518 	case HXGE_GET_TCAM:
519 		hxge_get_tcam(hxgep, mp->b_cont);
520 		miocack(wq, mp, 0, 0);
521 		break;
522 
523 	case HXGE_RTRACE:
524 		hxge_rtrace_ioctl(hxgep, wq, mp, iocp);
525 		break;
526 	}
527 }
528 
529 /*
530  * 10G is the only loopback mode for Hydra.
531  */
532 void
533 hxge_loopback_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
534     struct iocblk *iocp)
535 {
536 	p_lb_property_t lb_props;
537 	size_t		size;
538 	int		i;
539 
540 	if (mp->b_cont == NULL) {
541 		miocnak(wq, mp, 0, EINVAL);
542 	}
543 
544 	switch (iocp->ioc_cmd) {
545 	case LB_GET_MODE:
546 		HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_MODE command"));
547 		if (hxgep != NULL) {
548 			*(lb_info_sz_t *)mp->b_cont->b_rptr =
549 			    hxgep->statsp->port_stats.lb_mode;
550 			miocack(wq, mp, sizeof (hxge_lb_t), 0);
551 		} else
552 			miocnak(wq, mp, 0, EINVAL);
553 		break;
554 
555 	case LB_SET_MODE:
556 		HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_SET_LB_MODE command"));
557 		if (iocp->ioc_count != sizeof (uint32_t)) {
558 			miocack(wq, mp, 0, 0);
559 			break;
560 		}
561 		if ((hxgep != NULL) && hxge_set_lb(hxgep, wq, mp->b_cont)) {
562 			miocack(wq, mp, 0, 0);
563 		} else {
564 			miocnak(wq, mp, 0, EPROTO);
565 		}
566 		break;
567 
568 	case LB_GET_INFO_SIZE:
569 		HXGE_DEBUG_MSG((hxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
570 		if (hxgep != NULL) {
571 			size = sizeof (lb_normal) + sizeof (lb_mac10g);
572 
573 			*(lb_info_sz_t *)mp->b_cont->b_rptr = size;
574 
575 			HXGE_DEBUG_MSG((hxgep, IOC_CTL,
576 			    "HXGE_GET_LB_INFO command: size %d", size));
577 			miocack(wq, mp, sizeof (lb_info_sz_t), 0);
578 		} else
579 			miocnak(wq, mp, 0, EINVAL);
580 		break;
581 
582 	case LB_GET_INFO:
583 		HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command"));
584 		if (hxgep != NULL) {
585 			size = sizeof (lb_normal) + sizeof (lb_mac10g);
586 			HXGE_DEBUG_MSG((hxgep, IOC_CTL,
587 			    "HXGE_GET_LB_INFO command: size %d", size));
588 			if (size == iocp->ioc_count) {
589 				i = 0;
590 				lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
591 				lb_props[i++] = lb_normal;
592 				lb_props[i++] = lb_mac10g;
593 
594 				miocack(wq, mp, size, 0);
595 			} else
596 				miocnak(wq, mp, 0, EINVAL);
597 		} else {
598 			miocnak(wq, mp, 0, EINVAL);
599 			cmn_err(CE_NOTE, "hxge_hw_ioctl: invalid command 0x%x",
600 			    iocp->ioc_cmd);
601 		}
602 
603 		break;
604 	}
605 }
606 
607 /*ARGSUSED*/
608 boolean_t
609 hxge_set_lb(p_hxge_t hxgep, queue_t *wq, p_mblk_t mp)
610 {
611 	boolean_t	status = B_TRUE;
612 	uint32_t	lb_mode;
613 	lb_property_t	*lb_info;
614 
615 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_set_lb"));
616 	lb_mode = hxgep->statsp->port_stats.lb_mode;
617 	if (lb_mode == *(uint32_t *)mp->b_rptr) {
618 		cmn_err(CE_NOTE,
619 		    "hxge%d: Loopback mode already set (lb_mode %d).\n",
620 		    hxgep->instance, lb_mode);
621 		status = B_FALSE;
622 		goto hxge_set_lb_exit;
623 	}
624 
625 	lb_mode = *(uint32_t *)mp->b_rptr;
626 	lb_info = NULL;
627 
628 	/* 10G is the only loopback mode for Hydra */
629 	if (lb_mode == lb_normal.value)
630 		lb_info = &lb_normal;
631 	else if (lb_mode == lb_mac10g.value)
632 		lb_info = &lb_mac10g;
633 	else {
634 		cmn_err(CE_NOTE,
635 		    "hxge%d: Loopback mode not supported(mode %d).\n",
636 		    hxgep->instance, lb_mode);
637 		status = B_FALSE;
638 		goto hxge_set_lb_exit;
639 	}
640 
641 	if (lb_mode == hxge_lb_normal) {
642 		if (hxge_lb_dbg) {
643 			cmn_err(CE_NOTE,
644 			    "!hxge%d: Returning to normal operation",
645 			    hxgep->instance);
646 		}
647 
648 		hxgep->statsp->port_stats.lb_mode = hxge_lb_normal;
649 		hxge_global_reset(hxgep);
650 
651 		goto hxge_set_lb_exit;
652 	}
653 
654 	hxgep->statsp->port_stats.lb_mode = lb_mode;
655 
656 	if (hxge_lb_dbg)
657 		cmn_err(CE_NOTE, "!hxge%d: Adapter now in %s loopback mode",
658 		    hxgep->instance, lb_info->key);
659 
660 	if (lb_info->lb_type == internal) {
661 		if ((hxgep->statsp->port_stats.lb_mode == hxge_lb_mac10g))
662 			hxgep->statsp->mac_stats.link_speed = 10000;
663 		else {
664 			cmn_err(CE_NOTE,
665 			    "hxge%d: Loopback mode not supported(mode %d).\n",
666 			    hxgep->instance, lb_mode);
667 			status = B_FALSE;
668 			goto hxge_set_lb_exit;
669 		}
670 		hxgep->statsp->mac_stats.link_duplex = 2;
671 		hxgep->statsp->mac_stats.link_up = 1;
672 	}
673 
674 	hxge_global_reset(hxgep);
675 
676 hxge_set_lb_exit:
677 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
678 	    "<== hxge_set_lb status = 0x%08x", status));
679 
680 	return (status);
681 }
682 
683 void
684 hxge_check_hw_state(p_hxge_t hxgep)
685 {
686 	p_hxge_ldgv_t		ldgvp;
687 	p_hxge_ldv_t		t_ldvp;
688 
689 	HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "==> hxge_check_hw_state"));
690 
691 	MUTEX_ENTER(hxgep->genlock);
692 
693 	hxgep->hxge_timerid = 0;
694 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
695 		goto hxge_check_hw_state_exit;
696 	}
697 
698 	hxge_check_tx_hang(hxgep);
699 
700 	ldgvp = hxgep->ldgvp;
701 	if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
702 		HXGE_ERROR_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: "
703 		    "NULL ldgvp (interrupt not ready)."));
704 		goto hxge_check_hw_state_exit;
705 	}
706 
707 	t_ldvp = ldgvp->ldvp_syserr;
708 	if (!t_ldvp->use_timer) {
709 		HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: "
710 		    "ldgvp $%p t_ldvp $%p use_timer flag %d",
711 		    ldgvp, t_ldvp, t_ldvp->use_timer));
712 		goto hxge_check_hw_state_exit;
713 	}
714 
715 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
716 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
717 		    "Bad register acc handle"));
718 	}
719 
720 	(void) hxge_syserr_intr((caddr_t)t_ldvp, (caddr_t)hxgep);
721 
722 	hxgep->hxge_timerid = hxge_start_timer(hxgep, hxge_check_hw_state,
723 	    HXGE_CHECK_TIMER);
724 
725 hxge_check_hw_state_exit:
726 	MUTEX_EXIT(hxgep->genlock);
727 
728 	HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state"));
729 }
730 
731 /*ARGSUSED*/
732 static void
733 hxge_rtrace_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
734     struct iocblk *iocp)
735 {
736 	ssize_t		size;
737 	rtrace_t	*rtp;
738 	mblk_t		*nmp;
739 	uint32_t	i, j;
740 	uint32_t	start_blk;
741 	uint32_t	base_entry;
742 	uint32_t	num_entries;
743 
744 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_rtrace_ioctl"));
745 
746 	size = 1024;
747 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
748 		HXGE_DEBUG_MSG((hxgep, STR_CTL,
749 		    "malformed M_IOCTL MBLKL = %d size = %d",
750 		    MBLKL(mp->b_cont), size));
751 		miocnak(wq, mp, 0, EINVAL);
752 		return;
753 	}
754 
755 	nmp = mp->b_cont;
756 	rtp = (rtrace_t *)nmp->b_rptr;
757 	start_blk = rtp->next_idx;
758 	num_entries = rtp->last_idx;
759 	base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
760 
761 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "start_blk = %d\n", start_blk));
762 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "num_entries = %d\n", num_entries));
763 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "base_entry = %d\n", base_entry));
764 
765 	rtp->next_idx = hpi_rtracebuf.next_idx;
766 	rtp->last_idx = hpi_rtracebuf.last_idx;
767 	rtp->wrapped = hpi_rtracebuf.wrapped;
768 	for (i = 0, j = base_entry; i < num_entries; i++, j++) {
769 		rtp->buf[i].ctl_addr = hpi_rtracebuf.buf[j].ctl_addr;
770 		rtp->buf[i].val_l32 = hpi_rtracebuf.buf[j].val_l32;
771 		rtp->buf[i].val_h32 = hpi_rtracebuf.buf[j].val_h32;
772 	}
773 
774 	nmp->b_wptr = nmp->b_rptr + size;
775 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "<== hxge_rtrace_ioctl"));
776 	miocack(wq, mp, (int)size, 0);
777 }
778