xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_hw.c (revision 78801af7286cd73dbc996d470f789e75993cf15d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/nxge/nxge_impl.h>
26 
27 /*
28  * Tunable Receive Completion Ring Configuration B parameters.
29  */
30 uint16_t nxge_rx_pkt_thres;	/* 16 bits */
31 uint8_t nxge_rx_pkt_timeout;	/* 6 bits based on DMA clock divider */
32 
33 lb_property_t lb_normal = {normal, "normal", nxge_lb_normal};
34 lb_property_t lb_external10g = {external, "external10g", nxge_lb_ext10g};
35 lb_property_t lb_external1000 = {external, "external1000", nxge_lb_ext1000};
36 lb_property_t lb_external100 = {external, "external100", nxge_lb_ext100};
37 lb_property_t lb_external10 = {external, "external10", nxge_lb_ext10};
38 lb_property_t lb_phy10g = {internal, "phy10g", nxge_lb_phy10g};
39 lb_property_t lb_phy1000 = {internal, "phy1000", nxge_lb_phy1000};
40 lb_property_t lb_phy = {internal, "phy", nxge_lb_phy};
41 lb_property_t lb_serdes10g = {internal, "serdes10g", nxge_lb_serdes10g};
42 lb_property_t lb_serdes1000 = {internal, "serdes", nxge_lb_serdes1000};
43 lb_property_t lb_mac10g = {internal, "mac10g", nxge_lb_mac10g};
44 lb_property_t lb_mac1000 = {internal, "mac1000", nxge_lb_mac1000};
45 lb_property_t lb_mac = {internal, "mac10/100", nxge_lb_mac};
46 
47 uint32_t nxge_lb_dbg = 1;
48 void nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp);
49 void nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp);
50 static nxge_status_t nxge_check_xaui_xfp(p_nxge_t nxgep);
51 
52 extern uint32_t nxge_rx_mode;
53 extern uint32_t nxge_jumbo_mtu;
54 extern uint16_t	nxge_rdc_buf_offset;
55 
56 static void
57 nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
58 
59 /* ARGSUSED */
60 nxge_status_t
61 nxge_global_reset(p_nxge_t nxgep)
62 {
63 	nxge_status_t	status = NXGE_OK;
64 
65 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_global_reset"));
66 
67 	if ((status = nxge_link_monitor(nxgep, LINK_MONITOR_STOP)) != NXGE_OK)
68 		return (status);
69 	(void) nxge_intr_hw_disable(nxgep);
70 
71 	if ((nxgep->suspended) ||
72 	    ((nxgep->statsp->port_stats.lb_mode ==
73 	    nxge_lb_phy1000) ||
74 	    (nxgep->statsp->port_stats.lb_mode ==
75 	    nxge_lb_phy10g) ||
76 	    (nxgep->statsp->port_stats.lb_mode ==
77 	    nxge_lb_serdes1000) ||
78 	    (nxgep->statsp->port_stats.lb_mode ==
79 	    nxge_lb_serdes10g))) {
80 		if ((status = nxge_link_init(nxgep)) != NXGE_OK)
81 			return (status);
82 	}
83 
84 	if ((status = nxge_link_monitor(nxgep, LINK_MONITOR_START)) != NXGE_OK)
85 		return (status);
86 	if ((status = nxge_mac_init(nxgep)) != NXGE_OK)
87 		return (status);
88 	(void) nxge_intr_hw_enable(nxgep);
89 
90 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_global_reset"));
91 	return (status);
92 }
93 
94 /* ARGSUSED */
95 void
96 nxge_hw_id_init(p_nxge_t nxgep)
97 {
98 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_id_init"));
99 
100 	/*
101 	 * Set up initial hardware parameters required such as mac mtu size.
102 	 */
103 	nxgep->mac.is_jumbo = B_FALSE;
104 
105 	/*
106 	 * Set the maxframe size to 1522 (1518 + 4) to account for
107 	 * VLAN tagged packets.
108 	 */
109 	nxgep->mac.minframesize = NXGE_MIN_MAC_FRAMESIZE;	/* 64 */
110 	nxgep->mac.maxframesize = NXGE_MAX_MAC_FRAMESIZE;	/* 1522 */
111 
112 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init: maxframesize %d",
113 	    nxgep->mac.maxframesize));
114 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init"));
115 }
116 
117 /* ARGSUSED */
118 void
119 nxge_hw_init_niu_common(p_nxge_t nxgep)
120 {
121 	p_nxge_hw_list_t hw_p;
122 
123 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_init_niu_common"));
124 
125 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
126 		return;
127 	}
128 	MUTEX_ENTER(&hw_p->nxge_cfg_lock);
129 	if (hw_p->flags & COMMON_INIT_DONE) {
130 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
131 		    "nxge_hw_init_niu_common"
132 		    " already done for dip $%p function %d exiting",
133 		    hw_p->parent_devp, nxgep->function_num));
134 		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
135 		return;
136 	}
137 
138 	hw_p->flags = COMMON_INIT_START;
139 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
140 	    " Started for device id %x with function %d",
141 	    hw_p->parent_devp, nxgep->function_num));
142 
143 	/* per neptune common block init */
144 	(void) nxge_fflp_hw_reset(nxgep);
145 
146 	if (nxgep->niu_hw_type != NIU_HW_TYPE_RF) {
147 		switch (nxge_rdc_buf_offset) {
148 		case SW_OFFSET_NO_OFFSET:
149 		case SW_OFFSET_64:
150 		case SW_OFFSET_128:
151 			break;
152 		default:
153 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
154 			    "nxge_hw_init_niu_common: Unsupported RDC buffer"
155 			    " offset code %d, setting to %d",
156 			    nxge_rdc_buf_offset, SW_OFFSET_NO_OFFSET));
157 			nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
158 			break;
159 		}
160 	} else {
161 		switch (nxge_rdc_buf_offset) {
162 		case SW_OFFSET_NO_OFFSET:
163 		case SW_OFFSET_64:
164 		case SW_OFFSET_128:
165 		case SW_OFFSET_192:
166 		case SW_OFFSET_256:
167 		case SW_OFFSET_320:
168 		case SW_OFFSET_384:
169 		case SW_OFFSET_448:
170 			break;
171 		default:
172 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
173 			    "nxge_hw_init_niu_common: Unsupported RDC buffer"
174 			    " offset code %d, setting to %d",
175 			    nxge_rdc_buf_offset, SW_OFFSET_NO_OFFSET));
176 			nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
177 			break;
178 		}
179 	}
180 
181 	hw_p->flags = COMMON_INIT_DONE;
182 	MUTEX_EXIT(&hw_p->nxge_cfg_lock);
183 
184 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
185 	    " Done for device id %x with function %d",
186 	    hw_p->parent_devp, nxgep->function_num));
187 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_init_niu_common"));
188 }
189 
190 /* ARGSUSED */
191 uint_t
192 nxge_intr(void *arg1, void *arg2)
193 {
194 	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
195 	p_nxge_t nxgep = (p_nxge_t)arg2;
196 	uint_t serviced = DDI_INTR_UNCLAIMED;
197 	uint8_t ldv;
198 	npi_handle_t handle;
199 	p_nxge_ldgv_t ldgvp;
200 	p_nxge_ldg_t ldgp, t_ldgp;
201 	p_nxge_ldv_t t_ldvp;
202 	uint64_t vector0 = 0, vector1 = 0, vector2 = 0;
203 	int i, j, nldvs, nintrs = 1;
204 	npi_status_t rs = NPI_SUCCESS;
205 
206 	VERIFY(ldvp != NULL);
207 
208 	/* DDI interface returns second arg as NULL (n2 niumx driver) !!! */
209 	if (arg2 == NULL || (void *) ldvp->nxgep != arg2) {
210 		nxgep = ldvp->nxgep;
211 	}
212 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr"));
213 
214 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
215 		NXGE_ERROR_MSG((nxgep, INT_CTL,
216 		    "<== nxge_intr: not initialized 0x%x", serviced));
217 		return (serviced);
218 	}
219 
220 	ldgvp = nxgep->ldgvp;
221 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: ldgvp $%p", ldgvp));
222 	if (ldvp == NULL && ldgvp) {
223 		t_ldvp = ldvp = ldgvp->ldvp;
224 	}
225 	if (ldvp) {
226 		ldgp = t_ldgp = ldvp->ldgp;
227 	}
228 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
229 	    "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
230 	if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
231 		NXGE_ERROR_MSG((nxgep, INT_CTL, "==> nxge_intr: "
232 		    "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
233 		NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_intr: not ready"));
234 		return (DDI_INTR_UNCLAIMED);
235 	}
236 	/*
237 	 * This interrupt handler will have to go through all the logical
238 	 * devices to find out which logical device interrupts us and then call
239 	 * its handler to process the events.
240 	 */
241 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
242 	t_ldgp = ldgp;
243 	t_ldvp = ldgp->ldvp;
244 
245 	nldvs = ldgp->nldvs;
246 
247 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: #ldvs %d #intrs %d",
248 	    nldvs, ldgvp->ldg_intrs));
249 
250 	serviced = DDI_INTR_CLAIMED;
251 	for (i = 0; i < nintrs; i++, t_ldgp++) {
252 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr(%d): #ldvs %d "
253 		    " #intrs %d", i, nldvs, nintrs));
254 		/* Get this group's flag bits.  */
255 		rs = npi_ldsv_ldfs_get(handle, t_ldgp->ldg,
256 		    &vector0, &vector1, &vector2);
257 		if (rs) {
258 			continue;
259 		}
260 		if (!vector0 && !vector1 && !vector2) {
261 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
262 			    "no interrupts on group %d", t_ldgp->ldg));
263 			continue;
264 		}
265 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
266 		    "vector0 0x%llx vector1 0x%llx vector2 0x%llx",
267 		    vector0, vector1, vector2));
268 		nldvs = t_ldgp->nldvs;
269 		for (j = 0; j < nldvs; j++, t_ldvp++) {
270 			/*
271 			 * Call device's handler if flag bits are on.
272 			 */
273 			ldv = t_ldvp->ldv;
274 			if (((ldv < NXGE_MAC_LD_START) &&
275 			    (LDV_ON(ldv, vector0) |
276 			    (LDV_ON(ldv, vector1)))) ||
277 			    (ldv >= NXGE_MAC_LD_START &&
278 			    ((LDV2_ON_1(ldv, vector2)) ||
279 			    (LDV2_ON_2(ldv, vector2))))) {
280 				(void) (t_ldvp->ldv_intr_handler)(
281 				    (caddr_t)t_ldvp, arg2);
282 				NXGE_DEBUG_MSG((nxgep, INT_CTL,
283 				    "==> nxge_intr: "
284 				    "calling device %d #ldvs %d #intrs %d",
285 				    j, nldvs, nintrs));
286 			}
287 		}
288 	}
289 
290 	t_ldgp = ldgp;
291 	for (i = 0; i < nintrs; i++, t_ldgp++) {
292 		/* rearm group interrupts */
293 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: arm "
294 		    "group %d", t_ldgp->ldg));
295 		(void) npi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
296 		    t_ldgp->arm, t_ldgp->ldg_timer);
297 	}
298 
299 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr: serviced 0x%x",
300 	    serviced));
301 	return (serviced);
302 }
303 
304 
305 /*
306  * XFP Related Status Register Values Under 3 Different Conditions
307  *
308  * -------------+-------------------------+-------------------------
309  * 		|   Intel XFP and Avago   |	 Picolight XFP
310  * -------------+---------+---------------+---------+---------------
311  *		| STATUS0 | TX_ALARM_STAT | STATUS0 | TX_ALARM_STAT
312  * -------------+---------+---------------+---------+---------------
313  *	No XFP  | 0x639C  |      0x40     | 0x639C  |      0x40
314  * -------------+---------+---------------+---------+---------------
315  * XFP,linkdown | 0x43BC  |      0x40     | 0x639C  |      0x40
316  * -------------+---------+---------------+---------+---------------
317  * XFP,linkup   | 0x03FC  |      0x0      | 0x03FC  |      0x0
318  * -------------+---------+---------------+---------+---------------
319  * Note:
320  *      STATUS0         = BCM8704_USER_ANALOG_STATUS0_REG
321  *      TX_ALARM_STAT   = BCM8704_USER_TX_ALARM_STATUS_REG
322  */
323 /* ARGSUSED */
324 static nxge_status_t
325 nxge_check_xaui_xfp(p_nxge_t nxgep)
326 {
327 	nxge_status_t	status = NXGE_OK;
328 	uint8_t		phy_port_addr;
329 	uint16_t	val;
330 	uint16_t	val1;
331 	uint8_t		portn;
332 
333 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_check_xaui_xfp"));
334 
335 	portn = nxgep->mac.portnum;
336 	phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn;
337 
338 	/*
339 	 * Keep the val1 code even though it is not used. Could be
340 	 * used to differenciate the "No XFP" case and "XFP,linkdown"
341 	 * case when a Intel XFP is used.
342 	 */
343 	if ((status = nxge_mdio_read(nxgep, phy_port_addr,
344 	    BCM8704_USER_DEV3_ADDR,
345 	    BCM8704_USER_ANALOG_STATUS0_REG, &val)) == NXGE_OK) {
346 		status = nxge_mdio_read(nxgep, phy_port_addr,
347 		    BCM8704_USER_DEV3_ADDR,
348 		    BCM8704_USER_TX_ALARM_STATUS_REG, &val1);
349 	}
350 
351 	if (status != NXGE_OK) {
352 		NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
353 		    NXGE_FM_EREPORT_XAUI_ERR);
354 		if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities)) {
355 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
356 			    "XAUI is bad or absent on port<%d>\n", portn));
357 		}
358 #ifdef NXGE_DEBUG
359 	/*
360 	 * As a workaround for CR6693529, do not execute this block of
361 	 * code for non-debug driver. When a Picolight XFP transceiver
362 	 * is used, register BCM8704_USER_ANALOG_STATUS0_REG returns
363 	 * the same 0x639C value in normal link down case, which causes
364 	 * false FMA messages and link reconnection problem.
365 	 */
366 	} else if (nxgep->mac.portmode == PORT_10G_FIBER) {
367 		/*
368 		 * 0x03FC = 0000 0011 1111 1100 (XFP is normal)
369 		 * 0x639C = 0110 0011 1001 1100 (XFP has problem)
370 		 * bit14 = 1: PDM loss-of-light indicator
371 		 * bit13 = 1: PDM Rx loss-of-signal
372 		 * bit6  = 0: Light is NOT ok
373 		 * bit5  = 0: PMD Rx signal is NOT ok
374 		 */
375 		if (val == 0x639C) {
376 			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
377 			    NXGE_FM_EREPORT_XFP_ERR);
378 			if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities)) {
379 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
380 				    "XFP is bad or absent on port<%d>\n",
381 				    portn));
382 			}
383 			status = NXGE_ERROR;
384 		}
385 #endif
386 	}
387 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_check_xaui_xfp"));
388 	return (status);
389 }
390 
391 
392 /* ARGSUSED */
393 uint_t
394 nxge_syserr_intr(void *arg1, void *arg2)
395 {
396 	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
397 	p_nxge_t nxgep = (p_nxge_t)arg2;
398 	p_nxge_ldg_t ldgp = NULL;
399 	npi_handle_t handle;
400 	sys_err_stat_t estat;
401 	uint_t serviced = DDI_INTR_UNCLAIMED;
402 
403 	if (arg1 == NULL && arg2 == NULL) {
404 		return (serviced);
405 	}
406 	if (arg2 == NULL || ((ldvp != NULL && (void *) ldvp->nxgep != arg2))) {
407 		if (ldvp != NULL) {
408 			nxgep = ldvp->nxgep;
409 		}
410 	}
411 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
412 	    "==> nxge_syserr_intr: arg2 $%p arg1 $%p", nxgep, ldvp));
413 	if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
414 		ldgp = ldvp->ldgp;
415 		if (ldgp == NULL) {
416 			NXGE_ERROR_MSG((nxgep, SYSERR_CTL,
417 			    "<== nxge_syserrintr(no logical group): "
418 			    "arg2 $%p arg1 $%p", nxgep, ldvp));
419 			return (DDI_INTR_UNCLAIMED);
420 		}
421 		/*
422 		 * Get the logical device state if the function uses interrupt.
423 		 */
424 	}
425 
426 	/* This interrupt handler is for system error interrupts.  */
427 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
428 	estat.value = 0;
429 	(void) npi_fzc_sys_err_stat_get(handle, &estat);
430 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
431 	    "==> nxge_syserr_intr: device error 0x%016llx", estat.value));
432 
433 	if (estat.bits.ldw.smx) {
434 		/* SMX */
435 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
436 		    "==> nxge_syserr_intr: device error - SMX"));
437 	} else if (estat.bits.ldw.mac) {
438 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
439 		    "==> nxge_syserr_intr: device error - MAC"));
440 		/*
441 		 * There is nothing to be done here. All MAC errors go to per
442 		 * MAC port interrupt. MIF interrupt is the only MAC sub-block
443 		 * that can generate status here. MIF status reported will be
444 		 * ignored here. It is checked by per port timer instead.
445 		 */
446 	} else if (estat.bits.ldw.ipp) {
447 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
448 		    "==> nxge_syserr_intr: device error - IPP"));
449 		(void) nxge_ipp_handle_sys_errors(nxgep);
450 	} else if (estat.bits.ldw.zcp) {
451 		/* ZCP */
452 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
453 		    "==> nxge_syserr_intr: device error - ZCP"));
454 		(void) nxge_zcp_handle_sys_errors(nxgep);
455 	} else if (estat.bits.ldw.tdmc) {
456 		/* TDMC */
457 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
458 		    "==> nxge_syserr_intr: device error - TDMC"));
459 		/*
460 		 * There is no TDMC system errors defined in the PRM. All TDMC
461 		 * channel specific errors are reported on a per channel basis.
462 		 */
463 	} else if (estat.bits.ldw.rdmc) {
464 		/* RDMC */
465 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
466 		    "==> nxge_syserr_intr: device error - RDMC"));
467 		(void) nxge_rxdma_handle_sys_errors(nxgep);
468 	} else if (estat.bits.ldw.txc) {
469 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
470 		    "==> nxge_syserr_intr: device error - TXC"));
471 		(void) nxge_txc_handle_sys_errors(nxgep);
472 	} else if ((nxgep->niu_type != N2_NIU) && estat.bits.ldw.peu) {
473 		/* PCI-E */
474 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
475 		    "==> nxge_syserr_intr: device error - PCI-E"));
476 	} else if (estat.bits.ldw.meta1) {
477 		/* META1 */
478 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
479 		    "==> nxge_syserr_intr: device error - META1"));
480 	} else if (estat.bits.ldw.meta2) {
481 		/* META2 */
482 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
483 		    "==> nxge_syserr_intr: device error - META2"));
484 	} else if (estat.bits.ldw.fflp) {
485 		/* FFLP */
486 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
487 		    "==> nxge_syserr_intr: device error - FFLP"));
488 		(void) nxge_fflp_handle_sys_errors(nxgep);
489 	}
490 
491 	/*
492 	 * nxge_check_xaui_xfg checks XAUI for all of the following
493 	 * portmodes, but checks XFP only if portmode == PORT_10G_FIBER.
494 	 */
495 	if (nxgep->mac.portmode == PORT_10G_FIBER ||
496 	    nxgep->mac.portmode == PORT_10G_COPPER ||
497 	    nxgep->mac.portmode == PORT_10G_TN1010 ||
498 	    nxgep->mac.portmode == PORT_1G_TN1010) {
499 		if (nxge_check_xaui_xfp(nxgep) != NXGE_OK) {
500 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
501 			    "==> nxge_syserr_intr: device error - XAUI"));
502 		}
503 	}
504 
505 	serviced = DDI_INTR_CLAIMED;
506 
507 	if (ldgp != NULL && ldvp != NULL && ldgp->nldvs == 1 &&
508 	    !ldvp->use_timer) {
509 		(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
510 		    B_TRUE, ldgp->ldg_timer);
511 	}
512 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_syserr_intr"));
513 	return (serviced);
514 }
515 
516 /* ARGSUSED */
517 void
518 nxge_intr_hw_enable(p_nxge_t nxgep)
519 {
520 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_enable"));
521 	(void) nxge_intr_mask_mgmt_set(nxgep, B_TRUE);
522 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_enable"));
523 }
524 
525 /* ARGSUSED */
526 void
527 nxge_intr_hw_disable(p_nxge_t nxgep)
528 {
529 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_disable"));
530 	(void) nxge_intr_mask_mgmt_set(nxgep, B_FALSE);
531 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_disable"));
532 }
533 
534 /* ARGSUSED */
535 void
536 nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
537 {
538 	p_nxge_t nxgep = (p_nxge_t)arg;
539 	uint8_t channel;
540 	npi_handle_t handle;
541 	p_nxge_ldgv_t ldgvp;
542 	p_nxge_ldv_t ldvp;
543 	int i;
544 
545 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_hw_blank"));
546 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
547 
548 	if ((ldgvp = nxgep->ldgvp) == NULL) {
549 		NXGE_ERROR_MSG((nxgep, INT_CTL,
550 		    "<== nxge_rx_hw_blank (not enabled)"));
551 		return;
552 	}
553 	ldvp = nxgep->ldgvp->ldvp;
554 	if (ldvp == NULL) {
555 		return;
556 	}
557 	for (i = 0; i < ldgvp->nldvs; i++, ldvp++) {
558 		if (ldvp->is_rxdma) {
559 			channel = ldvp->channel;
560 			(void) npi_rxdma_cfg_rdc_rcr_threshold(handle,
561 			    channel, count);
562 			(void) npi_rxdma_cfg_rdc_rcr_timeout(handle,
563 			    channel, ticks);
564 		}
565 	}
566 
567 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_rx_hw_blank"));
568 }
569 
570 /* ARGSUSED */
571 void
572 nxge_hw_stop(p_nxge_t nxgep)
573 {
574 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_stop"));
575 
576 	(void) nxge_tx_mac_disable(nxgep);
577 	(void) nxge_rx_mac_disable(nxgep);
578 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
579 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
580 
581 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_stop"));
582 }
583 
584 /* ARGSUSED */
585 void
586 nxge_hw_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
587 {
588 	int cmd;
589 
590 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_hw_ioctl"));
591 
592 	if (nxgep == NULL) {
593 		miocnak(wq, mp, 0, EINVAL);
594 		return;
595 	}
596 	iocp->ioc_error = 0;
597 	cmd = iocp->ioc_cmd;
598 
599 	switch (cmd) {
600 	default:
601 		miocnak(wq, mp, 0, EINVAL);
602 		return;
603 
604 	case NXGE_GET_MII:
605 		nxge_get_mii(nxgep, mp->b_cont);
606 		miocack(wq, mp, sizeof (uint16_t), 0);
607 		break;
608 
609 	case NXGE_PUT_MII:
610 		nxge_put_mii(nxgep, mp->b_cont);
611 		miocack(wq, mp, 0, 0);
612 		break;
613 
614 	case NXGE_GET64:
615 		nxge_get64(nxgep, mp->b_cont);
616 		miocack(wq, mp, sizeof (uint32_t), 0);
617 		break;
618 
619 	case NXGE_PUT64:
620 		nxge_put64(nxgep, mp->b_cont);
621 		miocack(wq, mp, 0, 0);
622 		break;
623 
624 	case NXGE_PUT_TCAM:
625 		nxge_put_tcam(nxgep, mp->b_cont);
626 		miocack(wq, mp, 0, 0);
627 		break;
628 
629 	case NXGE_GET_TCAM:
630 		nxge_get_tcam(nxgep, mp->b_cont);
631 		miocack(wq, mp, 0, 0);
632 		break;
633 
634 	case NXGE_TX_REGS_DUMP:
635 		nxge_txdma_regs_dump_channels(nxgep);
636 		miocack(wq, mp, 0, 0);
637 		break;
638 	case NXGE_RX_REGS_DUMP:
639 		nxge_rxdma_regs_dump_channels(nxgep);
640 		miocack(wq, mp, 0, 0);
641 		break;
642 	case NXGE_VIR_INT_REGS_DUMP:
643 	case NXGE_INT_REGS_DUMP:
644 		nxge_virint_regs_dump(nxgep);
645 		miocack(wq, mp, 0, 0);
646 		break;
647 	case NXGE_RTRACE:
648 		nxge_rtrace_ioctl(nxgep, wq, mp, iocp);
649 		break;
650 	}
651 }
652 
653 /* ARGSUSED */
654 void
655 nxge_loopback_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
656 	struct iocblk *iocp)
657 {
658 	p_lb_property_t lb_props;
659 
660 	size_t size;
661 	int i;
662 
663 	if (mp->b_cont == NULL) {
664 		miocnak(wq, mp, 0, EINVAL);
665 	}
666 	switch (iocp->ioc_cmd) {
667 	case LB_GET_MODE:
668 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_MODE command"));
669 		if (nxgep != NULL) {
670 			*(lb_info_sz_t *)mp->b_cont->b_rptr =
671 			    nxgep->statsp->port_stats.lb_mode;
672 			miocack(wq, mp, sizeof (nxge_lb_t), 0);
673 		} else {
674 			miocnak(wq, mp, 0, EINVAL);
675 		}
676 		break;
677 	case LB_SET_MODE:
678 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_SET_LB_MODE command"));
679 		if (iocp->ioc_count != sizeof (uint32_t)) {
680 			miocack(wq, mp, 0, 0);
681 			break;
682 		}
683 		if ((nxgep != NULL) && nxge_set_lb(nxgep, wq, mp->b_cont)) {
684 			miocack(wq, mp, 0, 0);
685 		} else {
686 			miocnak(wq, mp, 0, EPROTO);
687 		}
688 		break;
689 	case LB_GET_INFO_SIZE:
690 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
691 		if (nxgep != NULL) {
692 			size = sizeof (lb_normal);
693 			if (nxgep->statsp->mac_stats.cap_10gfdx) {
694 				/* TN1010 does not support external loopback */
695 				if (nxgep->mac.portmode != PORT_1G_TN1010 &&
696 				    nxgep->mac.portmode != PORT_10G_TN1010) {
697 					size += sizeof (lb_external10g);
698 				}
699 				size += sizeof (lb_mac10g);
700 				/* Publish PHY loopback if PHY is present */
701 				if (nxgep->mac.portmode == PORT_10G_COPPER ||
702 				    nxgep->mac.portmode == PORT_10G_TN1010 ||
703 				    nxgep->mac.portmode == PORT_10G_FIBER)
704 					size += sizeof (lb_phy10g);
705 			}
706 
707 			/*
708 			 * Even if cap_10gfdx is false, we still do 10G
709 			 * serdes loopback as a part of SunVTS xnetlbtest
710 			 * internal loopback test.
711 			 */
712 			if (nxgep->mac.portmode == PORT_10G_FIBER ||
713 			    nxgep->mac.portmode == PORT_10G_COPPER ||
714 			    nxgep->mac.portmode == PORT_10G_TN1010 ||
715 			    nxgep->mac.portmode == PORT_10G_SERDES)
716 				size += sizeof (lb_serdes10g);
717 
718 			if (nxgep->statsp->mac_stats.cap_1000fdx) {
719 				/* TN1010 does not support external loopback */
720 				if (nxgep->mac.portmode != PORT_1G_TN1010 &&
721 				    nxgep->mac.portmode != PORT_10G_TN1010) {
722 					size += sizeof (lb_external1000);
723 				}
724 				size += sizeof (lb_mac1000);
725 				if (nxgep->mac.portmode == PORT_1G_COPPER ||
726 				    nxgep->mac.portmode == PORT_1G_TN1010 ||
727 				    nxgep->mac.portmode ==
728 				    PORT_1G_RGMII_FIBER)
729 					size += sizeof (lb_phy1000);
730 			}
731 			if (nxgep->statsp->mac_stats.cap_100fdx)
732 				size += sizeof (lb_external100);
733 			if (nxgep->statsp->mac_stats.cap_10fdx)
734 				size += sizeof (lb_external10);
735 			if (nxgep->mac.portmode == PORT_1G_FIBER ||
736 			    nxgep->mac.portmode == PORT_1G_TN1010 ||
737 			    nxgep->mac.portmode == PORT_1G_SERDES)
738 				size += sizeof (lb_serdes1000);
739 
740 			*(lb_info_sz_t *)mp->b_cont->b_rptr = size;
741 
742 			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
743 			    "NXGE_GET_LB_INFO command: size %d", size));
744 			miocack(wq, mp, sizeof (lb_info_sz_t), 0);
745 		} else
746 			miocnak(wq, mp, 0, EINVAL);
747 		break;
748 
749 	case LB_GET_INFO:
750 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_INFO command"));
751 		if (nxgep != NULL) {
752 			size = sizeof (lb_normal);
753 			if (nxgep->statsp->mac_stats.cap_10gfdx) {
754 				/* TN1010 does not support external loopback */
755 				if (nxgep->mac.portmode != PORT_1G_TN1010 &&
756 				    nxgep->mac.portmode != PORT_10G_TN1010) {
757 					size += sizeof (lb_external10g);
758 				}
759 				size += sizeof (lb_mac10g);
760 				/* Publish PHY loopback if PHY is present */
761 				if (nxgep->mac.portmode == PORT_10G_COPPER ||
762 				    nxgep->mac.portmode == PORT_10G_TN1010 ||
763 				    nxgep->mac.portmode == PORT_10G_FIBER)
764 					size += sizeof (lb_phy10g);
765 			}
766 			if (nxgep->mac.portmode == PORT_10G_FIBER ||
767 			    nxgep->mac.portmode == PORT_10G_COPPER ||
768 			    nxgep->mac.portmode == PORT_10G_TN1010 ||
769 			    nxgep->mac.portmode == PORT_10G_SERDES)
770 				size += sizeof (lb_serdes10g);
771 
772 			if (nxgep->statsp->mac_stats.cap_1000fdx) {
773 				/* TN1010 does not support external loopback */
774 				if (nxgep->mac.portmode != PORT_1G_TN1010 &&
775 				    nxgep->mac.portmode != PORT_10G_TN1010) {
776 					size += sizeof (lb_external1000);
777 				}
778 				size += sizeof (lb_mac1000);
779 				if (nxgep->mac.portmode == PORT_1G_COPPER ||
780 				    nxgep->mac.portmode == PORT_1G_TN1010 ||
781 				    nxgep->mac.portmode ==
782 				    PORT_1G_RGMII_FIBER)
783 					size += sizeof (lb_phy1000);
784 			}
785 			if (nxgep->statsp->mac_stats.cap_100fdx)
786 				size += sizeof (lb_external100);
787 
788 			if (nxgep->statsp->mac_stats.cap_10fdx)
789 				size += sizeof (lb_external10);
790 
791 			if (nxgep->mac.portmode == PORT_1G_FIBER ||
792 			    nxgep->mac.portmode == PORT_1G_TN1010 ||
793 			    nxgep->mac.portmode == PORT_1G_SERDES)
794 				size += sizeof (lb_serdes1000);
795 
796 			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
797 			    "NXGE_GET_LB_INFO command: size %d", size));
798 			if (size == iocp->ioc_count) {
799 				i = 0;
800 				lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
801 				lb_props[i++] = lb_normal;
802 
803 				if (nxgep->statsp->mac_stats.cap_10gfdx) {
804 					lb_props[i++] = lb_mac10g;
805 					if (nxgep->mac.portmode ==
806 					    PORT_10G_COPPER ||
807 					    nxgep->mac.portmode ==
808 					    PORT_10G_TN1010 ||
809 					    nxgep->mac.portmode ==
810 					    PORT_10G_FIBER) {
811 						lb_props[i++] = lb_phy10g;
812 					}
813 					/* TN1010 does not support ext lb */
814 					if (nxgep->mac.portmode !=
815 					    PORT_10G_TN1010 &&
816 					    nxgep->mac.portmode !=
817 					    PORT_1G_TN1010) {
818 						lb_props[i++] = lb_external10g;
819 					}
820 				}
821 
822 				if (nxgep->mac.portmode == PORT_10G_FIBER ||
823 				    nxgep->mac.portmode == PORT_10G_COPPER ||
824 				    nxgep->mac.portmode == PORT_10G_TN1010 ||
825 				    nxgep->mac.portmode == PORT_10G_SERDES)
826 					lb_props[i++] = lb_serdes10g;
827 
828 				if (nxgep->statsp->mac_stats.cap_1000fdx) {
829 					/* TN1010 does not support ext lb */
830 					if (nxgep->mac.portmode !=
831 					    PORT_10G_TN1010 &&
832 					    nxgep->mac.portmode !=
833 					    PORT_1G_TN1010) {
834 						lb_props[i++] = lb_external1000;
835 					}
836 				}
837 
838 				if (nxgep->statsp->mac_stats.cap_100fdx)
839 					lb_props[i++] = lb_external100;
840 
841 				if (nxgep->statsp->mac_stats.cap_10fdx)
842 					lb_props[i++] = lb_external10;
843 
844 				if (nxgep->statsp->mac_stats.cap_1000fdx)
845 					lb_props[i++] = lb_mac1000;
846 
847 				if (nxgep->mac.portmode == PORT_1G_COPPER ||
848 				    nxgep->mac.portmode == PORT_1G_TN1010 ||
849 				    nxgep->mac.portmode ==
850 				    PORT_1G_RGMII_FIBER) {
851 					if (nxgep->statsp->mac_stats.
852 					    cap_1000fdx)
853 						lb_props[i++] = lb_phy1000;
854 				} else if (nxgep->mac.portmode ==
855 				    PORT_1G_FIBER ||
856 				    nxgep->mac.portmode == PORT_1G_TN1010 ||
857 				    nxgep->mac.portmode == PORT_1G_SERDES) {
858 					lb_props[i++] = lb_serdes1000;
859 				}
860 				miocack(wq, mp, size, 0);
861 			} else
862 				miocnak(wq, mp, 0, EINVAL);
863 		} else {
864 			miocnak(wq, mp, 0, EINVAL);
865 			cmn_err(CE_NOTE, "!nxge_hw_ioctl: invalid command 0x%x",
866 			    iocp->ioc_cmd);
867 		}
868 		break;
869 	}
870 }
871 
872 /*
873  * DMA channel interfaces to access various channel specific
874  * hardware functions.
875  */
876 /* ARGSUSED */
877 void
878 nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle, void *reg_addrp,
879 	uint32_t reg_base, uint16_t channel, uint64_t reg_data)
880 {
881 	uint64_t reg_offset;
882 
883 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
884 
885 	/*
886 	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
887 	 * use the virtual DMA CSR address space from the config space (in PCI
888 	 * case), then the following code need to be use different offset
889 	 * computation macro.
890 	 */
891 	reg_offset = reg_base + DMC_OFFSET(channel);
892 	NXGE_PIO_WRITE64(handle, reg_addrp, reg_offset, reg_data);
893 
894 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
895 }
896 
897 /* ARGSUSED */
898 uint64_t
899 nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle, void *reg_addrp,
900 	uint32_t reg_base, uint16_t channel)
901 {
902 	uint64_t reg_offset;
903 
904 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
905 
906 	/*
907 	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
908 	 * use the virtual DMA CSR address space from the config space (in PCI
909 	 * case), then the following code need to be use different offset
910 	 * computation macro.
911 	 */
912 	reg_offset = reg_base + DMC_OFFSET(channel);
913 
914 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
915 
916 	return (NXGE_PIO_READ64(handle, reg_addrp, reg_offset));
917 }
918 
919 /* ARGSUSED */
920 void
921 nxge_get32(p_nxge_t nxgep, p_mblk_t mp)
922 {
923 	nxge_os_acc_handle_t nxge_regh;
924 
925 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
926 	nxge_regh = nxgep->dev_regs->nxge_regh;
927 
928 	*(uint32_t *)mp->b_rptr = NXGE_PIO_READ32(nxge_regh,
929 	    nxgep->dev_regs->nxge_regp, *(uint32_t *)mp->b_rptr);
930 
931 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "value = 0x%08X",
932 	    *(uint32_t *)mp->b_rptr));
933 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
934 }
935 
936 /* ARGSUSED */
937 void
938 nxge_put32(p_nxge_t nxgep, p_mblk_t mp)
939 {
940 	nxge_os_acc_handle_t nxge_regh;
941 	uint32_t *buf;
942 	uint8_t *reg;
943 
944 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
945 	nxge_regh = nxgep->dev_regs->nxge_regh;
946 
947 	buf = (uint32_t *)mp->b_rptr;
948 	reg = (uint8_t *)(nxgep->dev_regs->nxge_regp) + buf[0];
949 	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
950 	    "reg = 0x%016llX index = 0x%08X value = 0x%08X",
951 	    reg, buf[0], buf[1]));
952 	NXGE_PIO_WRITE32(nxge_regh, (uint32_t *)reg, 0, buf[1]);
953 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
954 }
955 
956 /*ARGSUSED*/
957 boolean_t
958 nxge_set_lb(p_nxge_t nxgep, queue_t *wq, p_mblk_t mp)
959 {
960 	boolean_t status = B_TRUE;
961 	uint32_t lb_mode;
962 	lb_property_t *lb_info;
963 
964 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_set_lb"));
965 	lb_mode = nxgep->statsp->port_stats.lb_mode;
966 	if (lb_mode == *(uint32_t *)mp->b_rptr) {
967 		cmn_err(CE_NOTE,
968 		    "!nxge%d: Loopback mode already set (lb_mode %d).\n",
969 		    nxgep->instance, lb_mode);
970 		status = B_FALSE;
971 		goto nxge_set_lb_exit;
972 	}
973 	lb_mode = *(uint32_t *)mp->b_rptr;
974 	lb_info = NULL;
975 	if (lb_mode == lb_normal.value)
976 		lb_info = &lb_normal;
977 	else if ((lb_mode == lb_external10g.value) &&
978 	    (nxgep->statsp->mac_stats.cap_10gfdx))
979 		lb_info = &lb_external10g;
980 	else if ((lb_mode == lb_external1000.value) &&
981 	    (nxgep->statsp->mac_stats.cap_1000fdx))
982 		lb_info = &lb_external1000;
983 	else if ((lb_mode == lb_external100.value) &&
984 	    (nxgep->statsp->mac_stats.cap_100fdx))
985 		lb_info = &lb_external100;
986 	else if ((lb_mode == lb_external10.value) &&
987 	    (nxgep->statsp->mac_stats.cap_10fdx))
988 		lb_info = &lb_external10;
989 	else if ((lb_mode == lb_phy10g.value) &&
990 	    (nxgep->mac.portmode == PORT_10G_COPPER ||
991 	    nxgep->mac.portmode == PORT_10G_TN1010 ||
992 	    nxgep->mac.portmode == PORT_10G_FIBER))
993 		lb_info = &lb_phy10g;
994 	else if ((lb_mode == lb_phy1000.value) &&
995 	    (nxgep->mac.portmode == PORT_1G_COPPER ||
996 	    nxgep->mac.portmode == PORT_1G_TN1010 ||
997 	    nxgep->mac.portmode == PORT_1G_RGMII_FIBER))
998 		lb_info = &lb_phy1000;
999 	else if ((lb_mode == lb_phy.value) &&
1000 	    (nxgep->mac.portmode == PORT_1G_COPPER))
1001 		lb_info = &lb_phy;
1002 	else if ((lb_mode == lb_serdes10g.value) &&
1003 	    (nxgep->mac.portmode == PORT_10G_FIBER ||
1004 	    nxgep->mac.portmode == PORT_10G_COPPER ||
1005 	    nxgep->mac.portmode == PORT_10G_TN1010 ||
1006 	    nxgep->mac.portmode == PORT_10G_SERDES))
1007 		lb_info = &lb_serdes10g;
1008 	else if ((lb_mode == lb_serdes1000.value) &&
1009 	    (nxgep->mac.portmode == PORT_1G_FIBER ||
1010 	    nxgep->mac.portmode == PORT_1G_TN1010 ||
1011 	    nxgep->mac.portmode == PORT_1G_SERDES))
1012 		lb_info = &lb_serdes1000;
1013 	else if (lb_mode == lb_mac10g.value)
1014 		lb_info = &lb_mac10g;
1015 	else if (lb_mode == lb_mac1000.value)
1016 		lb_info = &lb_mac1000;
1017 	else if (lb_mode == lb_mac.value)
1018 		lb_info = &lb_mac;
1019 	else {
1020 		cmn_err(CE_NOTE,
1021 		    "!nxge%d: Loopback mode not supported(mode %d).\n",
1022 		    nxgep->instance, lb_mode);
1023 		status = B_FALSE;
1024 		goto nxge_set_lb_exit;
1025 	}
1026 
1027 	if (lb_mode == nxge_lb_normal) {
1028 		if (nxge_lb_dbg) {
1029 			cmn_err(CE_NOTE,
1030 			    "!nxge%d: Returning to normal operation",
1031 			    nxgep->instance);
1032 		}
1033 		if (nxge_set_lb_normal(nxgep) != NXGE_OK) {
1034 			status = B_FALSE;
1035 			cmn_err(CE_NOTE,
1036 			    "!nxge%d: Failed to return to normal operation",
1037 			    nxgep->instance);
1038 		}
1039 		goto nxge_set_lb_exit;
1040 	}
1041 	nxgep->statsp->port_stats.lb_mode = lb_mode;
1042 
1043 	if (nxge_lb_dbg)
1044 		cmn_err(CE_NOTE,
1045 		    "!nxge%d: Adapter now in %s loopback mode",
1046 		    nxgep->instance, lb_info->key);
1047 	nxgep->param_arr[param_autoneg].value = 0;
1048 	nxgep->param_arr[param_anar_10gfdx].value =
1049 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
1050 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
1051 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
1052 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g);
1053 	nxgep->param_arr[param_anar_10ghdx].value = 0;
1054 	nxgep->param_arr[param_anar_1000fdx].value =
1055 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
1056 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac1000) ||
1057 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
1058 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000);
1059 	nxgep->param_arr[param_anar_1000hdx].value = 0;
1060 	nxgep->param_arr[param_anar_100fdx].value =
1061 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy) ||
1062 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
1063 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100);
1064 	nxgep->param_arr[param_anar_100hdx].value = 0;
1065 	nxgep->param_arr[param_anar_10fdx].value =
1066 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
1067 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10);
1068 	if (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) {
1069 		nxgep->param_arr[param_master_cfg_enable].value = 1;
1070 		nxgep->param_arr[param_master_cfg_value].value = 1;
1071 	}
1072 	if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
1073 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
1074 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100) ||
1075 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10) ||
1076 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
1077 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
1078 	    (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy)) {
1079 
1080 		if (nxge_link_monitor(nxgep, LINK_MONITOR_STOP) != NXGE_OK)
1081 			goto nxge_set_lb_err;
1082 		if (nxge_xcvr_find(nxgep) != NXGE_OK)
1083 			goto nxge_set_lb_err;
1084 		if (nxge_link_init(nxgep) != NXGE_OK)
1085 			goto nxge_set_lb_err;
1086 		if (nxge_link_monitor(nxgep, LINK_MONITOR_START) != NXGE_OK)
1087 			goto nxge_set_lb_err;
1088 	}
1089 	if (lb_info->lb_type == internal) {
1090 		if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
1091 		    (nxgep->statsp->port_stats.lb_mode ==
1092 		    nxge_lb_phy10g) ||
1093 		    (nxgep->statsp->port_stats.lb_mode ==
1094 		    nxge_lb_serdes10g)) {
1095 			nxgep->statsp->mac_stats.link_speed = 10000;
1096 		} else if ((nxgep->statsp->port_stats.lb_mode
1097 		    == nxge_lb_mac1000) ||
1098 		    (nxgep->statsp->port_stats.lb_mode ==
1099 		    nxge_lb_phy1000) ||
1100 		    (nxgep->statsp->port_stats.lb_mode ==
1101 		    nxge_lb_serdes1000)) {
1102 			nxgep->statsp->mac_stats.link_speed = 1000;
1103 		} else {
1104 			nxgep->statsp->mac_stats.link_speed = 100;
1105 		}
1106 		nxgep->statsp->mac_stats.link_duplex = 2;
1107 		nxgep->statsp->mac_stats.link_up = 1;
1108 	}
1109 	if (nxge_global_reset(nxgep) != NXGE_OK)
1110 		goto nxge_set_lb_err;
1111 
1112 nxge_set_lb_exit:
1113 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1114 	    "<== nxge_set_lb status = 0x%08x", status));
1115 	return (status);
1116 nxge_set_lb_err:
1117 	status = B_FALSE;
1118 	cmn_err(CE_NOTE,
1119 	    "!nxge%d: Failed to put adapter in %s loopback mode",
1120 	    nxgep->instance, lb_info->key);
1121 	return (status);
1122 }
1123 
1124 /* Return to normal (no loopback) mode */
1125 /* ARGSUSED */
1126 nxge_status_t
1127 nxge_set_lb_normal(p_nxge_t nxgep)
1128 {
1129 	nxge_status_t	status = NXGE_OK;
1130 
1131 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_lb_normal"));
1132 
1133 	nxgep->statsp->port_stats.lb_mode = nxge_lb_normal;
1134 	nxgep->param_arr[param_autoneg].value =
1135 	    nxgep->param_arr[param_autoneg].old_value;
1136 	nxgep->param_arr[param_anar_1000fdx].value =
1137 	    nxgep->param_arr[param_anar_1000fdx].old_value;
1138 	nxgep->param_arr[param_anar_1000hdx].value =
1139 	    nxgep->param_arr[param_anar_1000hdx].old_value;
1140 	nxgep->param_arr[param_anar_100fdx].value =
1141 	    nxgep->param_arr[param_anar_100fdx].old_value;
1142 	nxgep->param_arr[param_anar_100hdx].value =
1143 	    nxgep->param_arr[param_anar_100hdx].old_value;
1144 	nxgep->param_arr[param_anar_10fdx].value =
1145 	    nxgep->param_arr[param_anar_10fdx].old_value;
1146 	nxgep->param_arr[param_master_cfg_enable].value =
1147 	    nxgep->param_arr[param_master_cfg_enable].old_value;
1148 	nxgep->param_arr[param_master_cfg_value].value =
1149 	    nxgep->param_arr[param_master_cfg_value].old_value;
1150 
1151 	if ((status = nxge_global_reset(nxgep)) != NXGE_OK)
1152 		return (status);
1153 
1154 	if ((status = nxge_link_monitor(nxgep, LINK_MONITOR_STOP)) != NXGE_OK)
1155 		return (status);
1156 	if ((status = nxge_xcvr_find(nxgep)) != NXGE_OK)
1157 		return (status);
1158 	if ((status = nxge_link_init(nxgep)) != NXGE_OK)
1159 		return (status);
1160 	status = nxge_link_monitor(nxgep, LINK_MONITOR_START);
1161 
1162 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_lb_normal"));
1163 
1164 	return (status);
1165 }
1166 
1167 /* ARGSUSED */
1168 void
1169 nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp)
1170 {
1171 	uint16_t reg;
1172 
1173 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_get_mii"));
1174 
1175 	reg = *(uint16_t *)mp->b_rptr;
1176 	(void) nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn, reg,
1177 	    (uint16_t *)mp->b_rptr);
1178 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "reg = 0x%08X value = 0x%04X",
1179 	    reg, *(uint16_t *)mp->b_rptr));
1180 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_get_mii"));
1181 }
1182 
1183 /* ARGSUSED */
1184 void
1185 nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp)
1186 {
1187 	uint16_t *buf;
1188 	uint8_t reg;
1189 
1190 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_put_mii"));
1191 	buf = (uint16_t *)mp->b_rptr;
1192 	reg = (uint8_t)buf[0];
1193 	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
1194 	    "reg = 0x%08X index = 0x%08X value = 0x%08X",
1195 	    reg, buf[0], buf[1]));
1196 	(void) nxge_mii_write(nxgep, nxgep->statsp->mac_stats.xcvr_portn,
1197 	    reg, buf[1]);
1198 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_put_mii"));
1199 }
1200 
1201 /* ARGSUSED */
1202 void
1203 nxge_check_hw_state(p_nxge_t nxgep)
1204 {
1205 	p_nxge_ldgv_t ldgvp;
1206 	p_nxge_ldv_t t_ldvp;
1207 
1208 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "==> nxge_check_hw_state"));
1209 
1210 	MUTEX_ENTER(nxgep->genlock);
1211 	nxgep->nxge_timerid = 0;
1212 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1213 		goto nxge_check_hw_state_exit;
1214 	}
1215 	nxge_check_tx_hang(nxgep);
1216 
1217 	ldgvp = nxgep->ldgvp;
1218 	if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
1219 		NXGE_ERROR_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
1220 		    "NULL ldgvp (interrupt not ready)."));
1221 		goto nxge_check_hw_state_exit;
1222 	}
1223 	t_ldvp = ldgvp->ldvp_syserr;
1224 	if (!t_ldvp->use_timer) {
1225 		NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
1226 		    "ldgvp $%p t_ldvp $%p use_timer flag %d",
1227 		    ldgvp, t_ldvp, t_ldvp->use_timer));
1228 		goto nxge_check_hw_state_exit;
1229 	}
1230 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
1231 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1232 		    "port%d Bad register acc handle", nxgep->mac.portnum));
1233 	}
1234 	(void) nxge_syserr_intr((void *) t_ldvp, (void *) nxgep);
1235 
1236 	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
1237 	    NXGE_CHECK_TIMER);
1238 
1239 nxge_check_hw_state_exit:
1240 	MUTEX_EXIT(nxgep->genlock);
1241 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state"));
1242 }
1243 
1244 /*ARGSUSED*/
1245 static void
1246 nxge_rtrace_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
1247 	struct iocblk *iocp)
1248 {
1249 	ssize_t size;
1250 	rtrace_t *rtp;
1251 	mblk_t *nmp;
1252 	uint32_t i, j;
1253 	uint32_t start_blk;
1254 	uint32_t base_entry;
1255 	uint32_t num_entries;
1256 
1257 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_rtrace_ioctl"));
1258 
1259 	size = 1024;
1260 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
1261 		NXGE_DEBUG_MSG((nxgep, STR_CTL,
1262 		    "malformed M_IOCTL MBLKL = %d size = %d",
1263 		    MBLKL(mp->b_cont), size));
1264 		miocnak(wq, mp, 0, EINVAL);
1265 		return;
1266 	}
1267 	nmp = mp->b_cont;
1268 	rtp = (rtrace_t *)nmp->b_rptr;
1269 	start_blk = rtp->next_idx;
1270 	num_entries = rtp->last_idx;
1271 	base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
1272 
1273 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "start_blk = %d\n", start_blk));
1274 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "num_entries = %d\n", num_entries));
1275 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "base_entry = %d\n", base_entry));
1276 
1277 	rtp->next_idx = npi_rtracebuf.next_idx;
1278 	rtp->last_idx = npi_rtracebuf.last_idx;
1279 	rtp->wrapped = npi_rtracebuf.wrapped;
1280 	for (i = 0, j = base_entry; i < num_entries; i++, j++) {
1281 		rtp->buf[i].ctl_addr = npi_rtracebuf.buf[j].ctl_addr;
1282 		rtp->buf[i].val_l32 = npi_rtracebuf.buf[j].val_l32;
1283 		rtp->buf[i].val_h32 = npi_rtracebuf.buf[j].val_h32;
1284 	}
1285 
1286 	nmp->b_wptr = nmp->b_rptr + size;
1287 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_rtrace_ioctl"));
1288 	miocack(wq, mp, (int)size, 0);
1289 }
1290