xref: /titanic_51/usr/src/uts/common/io/nxge/nxge_hw.c (revision 15d9d0b528387242011cdcc6190c9e598cfe3a07)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/nxge/nxge_impl.h>
29 
30 /*
31  * Tunable Receive Completion Ring Configuration B parameters.
32  */
33 uint16_t nxge_rx_pkt_thres;	/* 16 bits */
34 uint8_t nxge_rx_pkt_timeout;	/* 6 bits based on DMA clock divider */
35 
36 lb_property_t lb_normal = {normal, "normal", nxge_lb_normal};
37 lb_property_t lb_external10g = {external, "external10g", nxge_lb_ext10g};
38 lb_property_t lb_external1000 = {external, "external1000", nxge_lb_ext1000};
39 lb_property_t lb_external100 = {external, "external100", nxge_lb_ext100};
40 lb_property_t lb_external10 = {external, "external10", nxge_lb_ext10};
41 lb_property_t lb_phy10g = {internal, "phy10g", nxge_lb_phy10g};
42 lb_property_t lb_phy1000 = {internal, "phy1000", nxge_lb_phy1000};
43 lb_property_t lb_phy = {internal, "phy", nxge_lb_phy};
44 lb_property_t lb_serdes10g = {internal, "serdes10g", nxge_lb_serdes10g};
45 lb_property_t lb_serdes1000 = {internal, "serdes", nxge_lb_serdes1000};
46 lb_property_t lb_mac10g = {internal, "mac10g", nxge_lb_mac10g};
47 lb_property_t lb_mac1000 = {internal, "mac1000", nxge_lb_mac1000};
48 lb_property_t lb_mac = {internal, "mac10/100", nxge_lb_mac};
49 
50 uint32_t nxge_lb_dbg = 1;
51 void nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp);
52 void nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp);
53 static nxge_status_t nxge_check_xaui_xfp(p_nxge_t nxgep);
54 
55 extern uint32_t nxge_rx_mode;
56 extern uint32_t nxge_jumbo_mtu;
57 extern boolean_t nxge_jumbo_enable;
58 
59 static void
60 nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
61 
62 /* ARGSUSED */
63 void
64 nxge_global_reset(p_nxge_t nxgep)
65 {
66 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_global_reset"));
67 
68 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
69 	(void) nxge_intr_hw_disable(nxgep);
70 
71 	if ((nxgep->suspended) ||
72 			((nxgep->statsp->port_stats.lb_mode ==
73 			nxge_lb_phy1000) ||
74 			(nxgep->statsp->port_stats.lb_mode ==
75 			nxge_lb_phy10g) ||
76 			(nxgep->statsp->port_stats.lb_mode ==
77 			nxge_lb_serdes1000) ||
78 			(nxgep->statsp->port_stats.lb_mode ==
79 			nxge_lb_serdes10g))) {
80 		(void) nxge_link_init(nxgep);
81 	}
82 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
83 	(void) nxge_mac_init(nxgep);
84 	(void) nxge_intr_hw_enable(nxgep);
85 
86 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_global_reset"));
87 }
88 
89 /* ARGSUSED */
90 void
91 nxge_hw_id_init(p_nxge_t nxgep)
92 {
93 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_id_init"));
94 	/*
95 	 * Set up initial hardware parameters required such as mac mtu size.
96 	 */
97 	nxgep->mac.is_jumbo = B_FALSE;
98 	nxgep->mac.maxframesize = NXGE_MTU_DEFAULT_MAX;	/* 1522 */
99 	if (nxgep->param_arr[param_accept_jumbo].value || nxge_jumbo_enable) {
100 		nxgep->mac.maxframesize = (uint16_t)nxge_jumbo_mtu;
101 		nxgep->mac.is_jumbo = B_TRUE;
102 	}
103 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
104 		"==> nxge_hw_id_init: maxframesize %d",
105 		nxgep->mac.maxframesize));
106 
107 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init"));
108 }
109 
110 /* ARGSUSED */
111 void
112 nxge_hw_init_niu_common(p_nxge_t nxgep)
113 {
114 	p_nxge_hw_list_t hw_p;
115 
116 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_init_niu_common"));
117 
118 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
119 		return;
120 	}
121 	MUTEX_ENTER(&hw_p->nxge_cfg_lock);
122 	if (hw_p->flags & COMMON_INIT_DONE) {
123 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
124 			"nxge_hw_init_niu_common"
125 			" already done for dip $%p function %d exiting",
126 			hw_p->parent_devp, nxgep->function_num));
127 		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
128 		return;
129 	}
130 
131 	hw_p->flags = COMMON_INIT_START;
132 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
133 		" Started for device id %x with function %d",
134 		hw_p->parent_devp, nxgep->function_num));
135 
136 	/* per neptune common block init */
137 	(void) nxge_fflp_hw_reset(nxgep);
138 
139 	hw_p->flags = COMMON_INIT_DONE;
140 	MUTEX_EXIT(&hw_p->nxge_cfg_lock);
141 
142 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
143 		" Done for device id %x with function %d",
144 		hw_p->parent_devp, nxgep->function_num));
145 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_init_niu_common"));
146 }
147 
148 /* ARGSUSED */
149 uint_t
150 nxge_intr(void *arg1, void *arg2)
151 {
152 	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
153 	p_nxge_t nxgep = (p_nxge_t)arg2;
154 	uint_t serviced = DDI_INTR_UNCLAIMED;
155 	uint8_t ldv;
156 	npi_handle_t handle;
157 	p_nxge_ldgv_t ldgvp;
158 	p_nxge_ldg_t ldgp, t_ldgp;
159 	p_nxge_ldv_t t_ldvp;
160 	uint64_t vector0 = 0, vector1 = 0, vector2 = 0;
161 	int i, j, nldvs, nintrs = 1;
162 	npi_status_t rs = NPI_SUCCESS;
163 
164 	/* DDI interface returns second arg as NULL (n2 niumx driver) !!! */
165 	if (arg2 == NULL || (void *) ldvp->nxgep != arg2) {
166 		nxgep = ldvp->nxgep;
167 	}
168 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr"));
169 
170 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
171 		NXGE_ERROR_MSG((nxgep, INT_CTL,
172 			"<== nxge_intr: not initialized 0x%x", serviced));
173 		return (serviced);
174 	}
175 
176 	ldgvp = nxgep->ldgvp;
177 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: ldgvp $%p", ldgvp));
178 	if (ldvp == NULL && ldgvp) {
179 		t_ldvp = ldvp = ldgvp->ldvp;
180 	}
181 	if (ldvp) {
182 		ldgp = t_ldgp = ldvp->ldgp;
183 	}
184 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
185 		"ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
186 	if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
187 		NXGE_ERROR_MSG((nxgep, INT_CTL, "==> nxge_intr: "
188 			"ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
189 		NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_intr: not ready"));
190 		return (DDI_INTR_UNCLAIMED);
191 	}
192 	/*
193 	 * This interrupt handler will have to go through all the logical
194 	 * devices to find out which logical device interrupts us and then call
195 	 * its handler to process the events.
196 	 */
197 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
198 	t_ldgp = ldgp;
199 	t_ldvp = ldgp->ldvp;
200 
201 	nldvs = ldgp->nldvs;
202 
203 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: #ldvs %d #intrs %d",
204 			nldvs, ldgvp->ldg_intrs));
205 
206 	serviced = DDI_INTR_CLAIMED;
207 	for (i = 0; i < nintrs; i++, t_ldgp++) {
208 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr(%d): #ldvs %d "
209 				" #intrs %d", i, nldvs, nintrs));
210 		/* Get this group's flag bits.  */
211 		t_ldgp->interrupted = B_FALSE;
212 		rs = npi_ldsv_ldfs_get(handle, t_ldgp->ldg,
213 			&vector0, &vector1, &vector2);
214 		if (rs) {
215 			continue;
216 		}
217 		if (!vector0 && !vector1 && !vector2) {
218 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
219 				"no interrupts on group %d", t_ldgp->ldg));
220 			continue;
221 		}
222 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
223 			"vector0 0x%llx vector1 0x%llx vector2 0x%llx",
224 			vector0, vector1, vector2));
225 		t_ldgp->interrupted = B_TRUE;
226 		nldvs = t_ldgp->nldvs;
227 		for (j = 0; j < nldvs; j++, t_ldvp++) {
228 			/*
229 			 * Call device's handler if flag bits are on.
230 			 */
231 			ldv = t_ldvp->ldv;
232 			if (((ldv < NXGE_MAC_LD_START) &&
233 					(LDV_ON(ldv, vector0) |
234 					(LDV_ON(ldv, vector1)))) ||
235 					(ldv >= NXGE_MAC_LD_START &&
236 					((LDV2_ON_1(ldv, vector2)) ||
237 					(LDV2_ON_2(ldv, vector2))))) {
238 				(void) (t_ldvp->ldv_intr_handler)(
239 					(caddr_t)t_ldvp, arg2);
240 				NXGE_DEBUG_MSG((nxgep, INT_CTL,
241 					"==> nxge_intr: "
242 					"calling device %d #ldvs %d #intrs %d",
243 					j, nldvs, nintrs));
244 			}
245 		}
246 	}
247 
248 	t_ldgp = ldgp;
249 	for (i = 0; i < nintrs; i++, t_ldgp++) {
250 		/* rearm group interrupts */
251 		if (t_ldgp->interrupted) {
252 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: arm "
253 				"group %d", t_ldgp->ldg));
254 			(void) npi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
255 				t_ldgp->arm, t_ldgp->ldg_timer);
256 		}
257 	}
258 
259 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr: serviced 0x%x",
260 		serviced));
261 	return (serviced);
262 }
263 
264 /* ARGSUSED */
265 static nxge_status_t
266 nxge_check_xaui_xfp(p_nxge_t nxgep)
267 {
268 	nxge_status_t	status = NXGE_OK;
269 	uint8_t		phy_port_addr;
270 	uint16_t	val;
271 	uint16_t	val1;
272 	uint8_t		portn;
273 
274 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_check_xaui_xfp"));
275 
276 	portn = nxgep->mac.portnum;
277 	phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn;
278 
279 	if ((status = nxge_mdio_read(nxgep, phy_port_addr,
280 	    BCM8704_USER_DEV3_ADDR,
281 	    BCM8704_USER_ANALOG_STATUS0_REG, &val)) == NXGE_OK) {
282 		status = nxge_mdio_read(nxgep, phy_port_addr,
283 		    BCM8704_USER_DEV3_ADDR,
284 		    BCM8704_USER_TX_ALARM_STATUS_REG, &val1);
285 	}
286 	if (status != NXGE_OK) {
287 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
288 		    NXGE_FM_EREPORT_XAUI_ERR);
289 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
290 		    "XAUI is bad or absent on port<%d>\n", portn));
291 	} else if (nxgep->mac.portmode == PORT_10G_FIBER) {
292 		/*
293 		 * 0x03FC = 0000 0011 1111 1100 (XFP is normal)
294 		 * 0x639C = 0110 0011 1001 1100 (XFP has problem)
295 		 * bit14 = 1: PDM loss-of-light indicator
296 		 * bit13 = 1: PDM Rx loss-of-signal
297 		 * bit6  = 0: Light is NOT ok
298 		 * bit5  = 0: PMD Rx signal is NOT ok
299 		 */
300 		if (val == 0x639C) {
301 			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
302 			    NXGE_FM_EREPORT_XFP_ERR);
303 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
304 			    "XFP is bad or absent on port<%d>\n", portn));
305 			status = NXGE_ERROR;
306 		}
307 	}
308 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_check_xaui_xfp"));
309 	return (status);
310 }
311 
312 
313 /* ARGSUSED */
314 uint_t
315 nxge_syserr_intr(void *arg1, void *arg2)
316 {
317 	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
318 	p_nxge_t nxgep = (p_nxge_t)arg2;
319 	p_nxge_ldg_t ldgp = NULL;
320 	npi_handle_t handle;
321 	sys_err_stat_t estat;
322 	uint_t serviced = DDI_INTR_UNCLAIMED;
323 
324 	if (arg1 == NULL && arg2 == NULL) {
325 		return (serviced);
326 	}
327 	if (arg2 == NULL || ((ldvp != NULL && (void *) ldvp->nxgep != arg2))) {
328 		if (ldvp != NULL) {
329 			nxgep = ldvp->nxgep;
330 		}
331 	}
332 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
333 		"==> nxge_syserr_intr: arg2 $%p arg1 $%p", nxgep, ldvp));
334 	if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
335 		ldgp = ldvp->ldgp;
336 		if (ldgp == NULL) {
337 			NXGE_ERROR_MSG((nxgep, SYSERR_CTL,
338 				"<== nxge_syserrintr(no logical group): "
339 				"arg2 $%p arg1 $%p", nxgep, ldvp));
340 			return (DDI_INTR_UNCLAIMED);
341 		}
342 		/*
343 		 * Get the logical device state if the function uses interrupt.
344 		 */
345 	}
346 
347 	/* This interrupt handler is for system error interrupts.  */
348 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
349 	estat.value = 0;
350 	(void) npi_fzc_sys_err_stat_get(handle, &estat);
351 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
352 		"==> nxge_syserr_intr: device error 0x%016llx", estat.value));
353 
354 	if (estat.bits.ldw.smx) {
355 		/* SMX */
356 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
357 			"==> nxge_syserr_intr: device error - SMX"));
358 	} else if (estat.bits.ldw.mac) {
359 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
360 			"==> nxge_syserr_intr: device error - MAC"));
361 		/*
362 		 * There is nothing to be done here. All MAC errors go to per
363 		 * MAC port interrupt. MIF interrupt is the only MAC sub-block
364 		 * that can generate status here. MIF status reported will be
365 		 * ignored here. It is checked by per port timer instead.
366 		 */
367 	} else if (estat.bits.ldw.ipp) {
368 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
369 			"==> nxge_syserr_intr: device error - IPP"));
370 		(void) nxge_ipp_handle_sys_errors(nxgep);
371 	} else if (estat.bits.ldw.zcp) {
372 		/* ZCP */
373 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
374 			"==> nxge_syserr_intr: device error - ZCP"));
375 		(void) nxge_zcp_handle_sys_errors(nxgep);
376 	} else if (estat.bits.ldw.tdmc) {
377 		/* TDMC */
378 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
379 			"==> nxge_syserr_intr: device error - TDMC"));
380 		/*
381 		 * There is no TDMC system errors defined in the PRM. All TDMC
382 		 * channel specific errors are reported on a per channel basis.
383 		 */
384 	} else if (estat.bits.ldw.rdmc) {
385 		/* RDMC */
386 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
387 			"==> nxge_syserr_intr: device error - RDMC"));
388 		(void) nxge_rxdma_handle_sys_errors(nxgep);
389 	} else if (estat.bits.ldw.txc) {
390 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
391 			"==> nxge_syserr_intr: device error - TXC"));
392 		(void) nxge_txc_handle_sys_errors(nxgep);
393 	} else if ((nxgep->niu_type != N2_NIU) && estat.bits.ldw.peu) {
394 		/* PCI-E */
395 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
396 			"==> nxge_syserr_intr: device error - PCI-E"));
397 	} else if (estat.bits.ldw.meta1) {
398 		/* META1 */
399 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
400 			"==> nxge_syserr_intr: device error - META1"));
401 	} else if (estat.bits.ldw.meta2) {
402 		/* META2 */
403 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
404 			"==> nxge_syserr_intr: device error - META2"));
405 	} else if (estat.bits.ldw.fflp) {
406 		/* FFLP */
407 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
408 			"==> nxge_syserr_intr: device error - FFLP"));
409 		(void) nxge_fflp_handle_sys_errors(nxgep);
410 	}
411 
412 	if (nxgep->mac.portmode == PORT_10G_FIBER ||
413 	    nxgep->mac.portmode == PORT_10G_COPPER) {
414 		if (nxge_check_xaui_xfp(nxgep) != NXGE_OK) {
415 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
416 			    "==> nxge_syserr_intr: device error - XAUI"));
417 		}
418 	}
419 
420 	serviced = DDI_INTR_CLAIMED;
421 
422 	if (ldgp != NULL && ldvp != NULL && ldgp->nldvs == 1 &&
423 		!ldvp->use_timer) {
424 		(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
425 			B_TRUE, ldgp->ldg_timer);
426 	}
427 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_syserr_intr"));
428 	return (serviced);
429 }
430 
431 /* ARGSUSED */
432 void
433 nxge_intr_hw_enable(p_nxge_t nxgep)
434 {
435 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_enable"));
436 	(void) nxge_intr_mask_mgmt_set(nxgep, B_TRUE);
437 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_enable"));
438 }
439 
440 /* ARGSUSED */
441 void
442 nxge_intr_hw_disable(p_nxge_t nxgep)
443 {
444 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_disable"));
445 	(void) nxge_intr_mask_mgmt_set(nxgep, B_FALSE);
446 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_disable"));
447 }
448 
449 /* ARGSUSED */
450 void
451 nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
452 {
453 	p_nxge_t nxgep = (p_nxge_t)arg;
454 	uint8_t channel;
455 	npi_handle_t handle;
456 	p_nxge_ldgv_t ldgvp;
457 	p_nxge_ldv_t ldvp;
458 	int i;
459 
460 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_hw_blank"));
461 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
462 
463 	if ((ldgvp = nxgep->ldgvp) == NULL) {
464 		NXGE_ERROR_MSG((nxgep, INT_CTL,
465 			"<== nxge_rx_hw_blank (not enabled)"));
466 		return;
467 	}
468 	ldvp = nxgep->ldgvp->ldvp;
469 	if (ldvp == NULL) {
470 		return;
471 	}
472 	for (i = 0; i < ldgvp->nldvs; i++, ldvp++) {
473 		if (ldvp->is_rxdma) {
474 			channel = ldvp->channel;
475 			(void) npi_rxdma_cfg_rdc_rcr_threshold(handle,
476 				channel, count);
477 			(void) npi_rxdma_cfg_rdc_rcr_timeout(handle,
478 				channel, ticks);
479 		}
480 	}
481 
482 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_rx_hw_blank"));
483 }
484 
485 /* ARGSUSED */
486 void
487 nxge_hw_stop(p_nxge_t nxgep)
488 {
489 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_stop"));
490 
491 	(void) nxge_tx_mac_disable(nxgep);
492 	(void) nxge_rx_mac_disable(nxgep);
493 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
494 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
495 
496 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_stop"));
497 }
498 
499 /* ARGSUSED */
500 void
501 nxge_hw_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
502 {
503 	int cmd;
504 
505 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_hw_ioctl"));
506 
507 	if (nxgep == NULL) {
508 		miocnak(wq, mp, 0, EINVAL);
509 		return;
510 	}
511 	iocp->ioc_error = 0;
512 	cmd = iocp->ioc_cmd;
513 
514 	switch (cmd) {
515 	default:
516 		miocnak(wq, mp, 0, EINVAL);
517 		return;
518 
519 	case NXGE_GET_MII:
520 		nxge_get_mii(nxgep, mp->b_cont);
521 		miocack(wq, mp, sizeof (uint16_t), 0);
522 		break;
523 
524 	case NXGE_PUT_MII:
525 		nxge_put_mii(nxgep, mp->b_cont);
526 		miocack(wq, mp, 0, 0);
527 		break;
528 
529 	case NXGE_GET64:
530 		nxge_get64(nxgep, mp->b_cont);
531 		miocack(wq, mp, sizeof (uint32_t), 0);
532 		break;
533 
534 	case NXGE_PUT64:
535 		nxge_put64(nxgep, mp->b_cont);
536 		miocack(wq, mp, 0, 0);
537 		break;
538 
539 	case NXGE_PUT_TCAM:
540 		nxge_put_tcam(nxgep, mp->b_cont);
541 		miocack(wq, mp, 0, 0);
542 		break;
543 
544 	case NXGE_GET_TCAM:
545 		nxge_get_tcam(nxgep, mp->b_cont);
546 		miocack(wq, mp, 0, 0);
547 		break;
548 
549 	case NXGE_TX_REGS_DUMP:
550 		nxge_txdma_regs_dump_channels(nxgep);
551 		miocack(wq, mp, 0, 0);
552 		break;
553 	case NXGE_RX_REGS_DUMP:
554 		nxge_rxdma_regs_dump_channels(nxgep);
555 		miocack(wq, mp, 0, 0);
556 		break;
557 	case NXGE_VIR_INT_REGS_DUMP:
558 	case NXGE_INT_REGS_DUMP:
559 		nxge_virint_regs_dump(nxgep);
560 		miocack(wq, mp, 0, 0);
561 		break;
562 	case NXGE_RTRACE:
563 		nxge_rtrace_ioctl(nxgep, wq, mp, iocp);
564 		break;
565 	}
566 }
567 
568 /* ARGSUSED */
569 void
570 nxge_loopback_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
571 	struct iocblk *iocp)
572 {
573 	p_lb_property_t lb_props;
574 
575 	size_t size;
576 	int i;
577 
578 	if (mp->b_cont == NULL) {
579 		miocnak(wq, mp, 0, EINVAL);
580 	}
581 	switch (iocp->ioc_cmd) {
582 	case LB_GET_MODE:
583 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_MODE command"));
584 		if (nxgep != NULL) {
585 			*(lb_info_sz_t *)mp->b_cont->b_rptr =
586 				nxgep->statsp->port_stats.lb_mode;
587 			miocack(wq, mp, sizeof (nxge_lb_t), 0);
588 		} else {
589 			miocnak(wq, mp, 0, EINVAL);
590 		}
591 		break;
592 	case LB_SET_MODE:
593 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_SET_LB_MODE command"));
594 		if (iocp->ioc_count != sizeof (uint32_t)) {
595 			miocack(wq, mp, 0, 0);
596 			break;
597 		}
598 		if ((nxgep != NULL) && nxge_set_lb(nxgep, wq, mp->b_cont)) {
599 			miocack(wq, mp, 0, 0);
600 		} else {
601 			miocnak(wq, mp, 0, EPROTO);
602 		}
603 		break;
604 	case LB_GET_INFO_SIZE:
605 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
606 		if (nxgep != NULL) {
607 			size = sizeof (lb_normal);
608 			if (nxgep->statsp->mac_stats.cap_10gfdx) {
609 				size += sizeof (lb_external10g);
610 				size += sizeof (lb_mac10g);
611 				/* Publish PHY loopback if PHY is present */
612 				if (nxgep->mac.portmode == PORT_10G_COPPER ||
613 				    nxgep->mac.portmode == PORT_10G_FIBER)
614 					size += sizeof (lb_phy10g);
615 			}
616 			if (nxgep->mac.portmode == PORT_10G_FIBER ||
617 			    nxgep->mac.portmode == PORT_10G_SERDES)
618 				size += sizeof (lb_serdes10g);
619 
620 			if (nxgep->statsp->mac_stats.cap_1000fdx) {
621 				size += sizeof (lb_external1000);
622 				size += sizeof (lb_mac1000);
623 				if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
624 				    (nxgep->mac.portmode ==
625 				    PORT_1G_RGMII_FIBER))
626 					size += sizeof (lb_phy1000);
627 			}
628 			if (nxgep->statsp->mac_stats.cap_100fdx)
629 				size += sizeof (lb_external100);
630 			if (nxgep->statsp->mac_stats.cap_10fdx)
631 				size += sizeof (lb_external10);
632 			if (nxgep->mac.portmode == PORT_1G_FIBER ||
633 			    nxgep->mac.portmode == PORT_1G_SERDES)
634 				size += sizeof (lb_serdes1000);
635 
636 			*(lb_info_sz_t *)mp->b_cont->b_rptr = size;
637 
638 			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
639 				"NXGE_GET_LB_INFO command: size %d", size));
640 			miocack(wq, mp, sizeof (lb_info_sz_t), 0);
641 		} else
642 			miocnak(wq, mp, 0, EINVAL);
643 		break;
644 
645 	case LB_GET_INFO:
646 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_INFO command"));
647 		if (nxgep != NULL) {
648 			size = sizeof (lb_normal);
649 			if (nxgep->statsp->mac_stats.cap_10gfdx) {
650 				size += sizeof (lb_external10g);
651 				size += sizeof (lb_mac10g);
652 				/* Publish PHY loopback if PHY is present */
653 				if (nxgep->mac.portmode == PORT_10G_COPPER ||
654 				    nxgep->mac.portmode == PORT_10G_FIBER)
655 					size += sizeof (lb_phy10g);
656 			}
657 			if (nxgep->mac.portmode == PORT_10G_FIBER ||
658 			    nxgep->mac.portmode == PORT_10G_SERDES)
659 				size += sizeof (lb_serdes10g);
660 
661 			if (nxgep->statsp->mac_stats.cap_1000fdx) {
662 				size += sizeof (lb_external1000);
663 				size += sizeof (lb_mac1000);
664 				if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
665 				    (nxgep->mac.portmode ==
666 				    PORT_1G_RGMII_FIBER))
667 					size += sizeof (lb_phy1000);
668 			}
669 			if (nxgep->statsp->mac_stats.cap_100fdx)
670 				size += sizeof (lb_external100);
671 			if (nxgep->statsp->mac_stats.cap_10fdx)
672 				size += sizeof (lb_external10);
673 			if (nxgep->mac.portmode == PORT_1G_FIBER ||
674 			    nxgep->mac.portmode == PORT_1G_SERDES)
675 				size += sizeof (lb_serdes1000);
676 
677 			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
678 				"NXGE_GET_LB_INFO command: size %d", size));
679 			if (size == iocp->ioc_count) {
680 				i = 0;
681 				lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
682 				lb_props[i++] = lb_normal;
683 				if (nxgep->statsp->mac_stats.cap_10gfdx) {
684 					lb_props[i++] = lb_mac10g;
685 					if (nxgep->mac.portmode ==
686 					    PORT_10G_COPPER ||
687 					    nxgep->mac.portmode ==
688 					    PORT_10G_FIBER)
689 						lb_props[i++] = lb_phy10g;
690 					lb_props[i++] = lb_external10g;
691 				}
692 				if (nxgep->mac.portmode == PORT_10G_FIBER ||
693 				    nxgep->mac.portmode == PORT_10G_SERDES)
694 					lb_props[i++] = lb_serdes10g;
695 
696 				if (nxgep->statsp->mac_stats.cap_1000fdx)
697 					lb_props[i++] = lb_external1000;
698 				if (nxgep->statsp->mac_stats.cap_100fdx)
699 					lb_props[i++] = lb_external100;
700 				if (nxgep->statsp->mac_stats.cap_10fdx)
701 					lb_props[i++] = lb_external10;
702 				if (nxgep->statsp->mac_stats.cap_1000fdx)
703 					lb_props[i++] = lb_mac1000;
704 				if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
705 				    (nxgep->mac.portmode ==
706 				    PORT_1G_RGMII_FIBER)) {
707 					if (nxgep->statsp->mac_stats.
708 						cap_1000fdx)
709 						lb_props[i++] = lb_phy1000;
710 				} else if ((nxgep->mac.portmode ==
711 				    PORT_1G_FIBER) ||
712 				    (nxgep->mac.portmode == PORT_1G_SERDES)) {
713 					lb_props[i++] = lb_serdes1000;
714 				}
715 				miocack(wq, mp, size, 0);
716 			} else
717 				miocnak(wq, mp, 0, EINVAL);
718 		} else {
719 			miocnak(wq, mp, 0, EINVAL);
720 			cmn_err(CE_NOTE, "!nxge_hw_ioctl: invalid command 0x%x",
721 				iocp->ioc_cmd);
722 		}
723 		break;
724 	}
725 }
726 
727 /*
728  * DMA channel interfaces to access various channel specific
729  * hardware functions.
730  */
731 /* ARGSUSED */
732 void
733 nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle, void *reg_addrp,
734 	uint32_t reg_base, uint16_t channel, uint64_t reg_data)
735 {
736 	uint64_t reg_offset;
737 
738 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
739 
740 	/*
741 	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
742 	 * use the virtual DMA CSR address space from the config space (in PCI
743 	 * case), then the following code need to be use different offset
744 	 * computation macro.
745 	 */
746 	reg_offset = reg_base + DMC_OFFSET(channel);
747 	NXGE_PIO_WRITE64(handle, reg_addrp, reg_offset, reg_data);
748 
749 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
750 }
751 
752 /* ARGSUSED */
753 uint64_t
754 nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle, void *reg_addrp,
755 	uint32_t reg_base, uint16_t channel)
756 {
757 	uint64_t reg_offset;
758 
759 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
760 
761 	/*
762 	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
763 	 * use the virtual DMA CSR address space from the config space (in PCI
764 	 * case), then the following code need to be use different offset
765 	 * computation macro.
766 	 */
767 	reg_offset = reg_base + DMC_OFFSET(channel);
768 
769 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
770 
771 	return (NXGE_PIO_READ64(handle, reg_addrp, reg_offset));
772 }
773 
774 /* ARGSUSED */
775 void
776 nxge_get32(p_nxge_t nxgep, p_mblk_t mp)
777 {
778 	nxge_os_acc_handle_t nxge_regh;
779 
780 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
781 	nxge_regh = nxgep->dev_regs->nxge_regh;
782 
783 	*(uint32_t *)mp->b_rptr = NXGE_PIO_READ32(nxge_regh,
784 		nxgep->dev_regs->nxge_regp, *(uint32_t *)mp->b_rptr);
785 
786 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "value = 0x%08X",
787 		*(uint32_t *)mp->b_rptr));
788 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
789 }
790 
791 /* ARGSUSED */
792 void
793 nxge_put32(p_nxge_t nxgep, p_mblk_t mp)
794 {
795 	nxge_os_acc_handle_t nxge_regh;
796 	uint32_t *buf;
797 	uint8_t *reg;
798 
799 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
800 	nxge_regh = nxgep->dev_regs->nxge_regh;
801 
802 	buf = (uint32_t *)mp->b_rptr;
803 	reg = (uint8_t *)(nxgep->dev_regs->nxge_regp) + buf[0];
804 	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
805 		"reg = 0x%016llX index = 0x%08X value = 0x%08X",
806 		reg, buf[0], buf[1]));
807 	NXGE_PIO_WRITE32(nxge_regh, (uint32_t *)reg, 0, buf[1]);
808 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
809 }
810 
811 /*ARGSUSED*/
812 boolean_t
813 nxge_set_lb(p_nxge_t nxgep, queue_t *wq, p_mblk_t mp)
814 {
815 	boolean_t status = B_TRUE;
816 	uint32_t lb_mode;
817 	lb_property_t *lb_info;
818 
819 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_set_lb"));
820 	lb_mode = nxgep->statsp->port_stats.lb_mode;
821 	if (lb_mode == *(uint32_t *)mp->b_rptr) {
822 		cmn_err(CE_NOTE,
823 			"!nxge%d: Loopback mode already set (lb_mode %d).\n",
824 			nxgep->instance, lb_mode);
825 		status = B_FALSE;
826 		goto nxge_set_lb_exit;
827 	}
828 	lb_mode = *(uint32_t *)mp->b_rptr;
829 	lb_info = NULL;
830 	if (lb_mode == lb_normal.value)
831 		lb_info = &lb_normal;
832 	else if ((lb_mode == lb_external10g.value) &&
833 		(nxgep->statsp->mac_stats.cap_10gfdx))
834 		lb_info = &lb_external10g;
835 	else if ((lb_mode == lb_external1000.value) &&
836 		(nxgep->statsp->mac_stats.cap_1000fdx))
837 		lb_info = &lb_external1000;
838 	else if ((lb_mode == lb_external100.value) &&
839 		(nxgep->statsp->mac_stats.cap_100fdx))
840 		lb_info = &lb_external100;
841 	else if ((lb_mode == lb_external10.value) &&
842 		(nxgep->statsp->mac_stats.cap_10fdx))
843 		lb_info = &lb_external10;
844 	else if ((lb_mode == lb_phy10g.value) &&
845 			((nxgep->mac.portmode == PORT_10G_COPPER) ||
846 			(nxgep->mac.portmode == PORT_10G_FIBER)))
847 		lb_info = &lb_phy10g;
848 	else if ((lb_mode == lb_phy1000.value) &&
849 	    ((nxgep->mac.portmode == PORT_1G_COPPER) ||
850 	    (nxgep->mac.portmode == PORT_1G_RGMII_FIBER)))
851 		lb_info = &lb_phy1000;
852 	else if ((lb_mode == lb_phy.value) &&
853 		(nxgep->mac.portmode == PORT_1G_COPPER))
854 		lb_info = &lb_phy;
855 	else if ((lb_mode == lb_serdes10g.value) &&
856 	    ((nxgep->mac.portmode == PORT_10G_FIBER) ||
857 	    (nxgep->mac.portmode == PORT_10G_COPPER) ||
858 	    (nxgep->mac.portmode == PORT_10G_SERDES)))
859 		lb_info = &lb_serdes10g;
860 	else if ((lb_mode == lb_serdes1000.value) &&
861 	    (nxgep->mac.portmode == PORT_1G_FIBER ||
862 	    (nxgep->mac.portmode == PORT_1G_SERDES)))
863 		lb_info = &lb_serdes1000;
864 	else if (lb_mode == lb_mac10g.value)
865 		lb_info = &lb_mac10g;
866 	else if (lb_mode == lb_mac1000.value)
867 		lb_info = &lb_mac1000;
868 	else if (lb_mode == lb_mac.value)
869 		lb_info = &lb_mac;
870 	else {
871 		cmn_err(CE_NOTE,
872 			"!nxge%d: Loopback mode not supported(mode %d).\n",
873 			nxgep->instance, lb_mode);
874 		status = B_FALSE;
875 		goto nxge_set_lb_exit;
876 	}
877 
878 	if (lb_mode == nxge_lb_normal) {
879 		if (nxge_lb_dbg) {
880 			cmn_err(CE_NOTE,
881 				"!nxge%d: Returning to normal operation",
882 				nxgep->instance);
883 		}
884 		nxge_set_lb_normal(nxgep);
885 		goto nxge_set_lb_exit;
886 	}
887 	nxgep->statsp->port_stats.lb_mode = lb_mode;
888 
889 	if (nxge_lb_dbg)
890 		cmn_err(CE_NOTE,
891 			"!nxge%d: Adapter now in %s loopback mode",
892 			nxgep->instance, lb_info->key);
893 	nxgep->param_arr[param_autoneg].value = 0;
894 	nxgep->param_arr[param_anar_10gfdx].value =
895 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
896 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
897 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
898 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g);
899 	nxgep->param_arr[param_anar_10ghdx].value = 0;
900 	nxgep->param_arr[param_anar_1000fdx].value =
901 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
902 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac1000) ||
903 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
904 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000);
905 	nxgep->param_arr[param_anar_1000hdx].value = 0;
906 	nxgep->param_arr[param_anar_100fdx].value =
907 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy) ||
908 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
909 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100);
910 	nxgep->param_arr[param_anar_100hdx].value = 0;
911 	nxgep->param_arr[param_anar_10fdx].value =
912 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
913 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10);
914 	if (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) {
915 		nxgep->param_arr[param_master_cfg_enable].value = 1;
916 		nxgep->param_arr[param_master_cfg_value].value = 1;
917 	}
918 	if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
919 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
920 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100) ||
921 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10) ||
922 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
923 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
924 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy)) {
925 
926 		(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
927 		(void) nxge_xcvr_find(nxgep);
928 		(void) nxge_link_init(nxgep);
929 		(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
930 	}
931 	if (lb_info->lb_type == internal) {
932 		if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
933 				(nxgep->statsp->port_stats.lb_mode ==
934 				nxge_lb_phy10g) ||
935 				(nxgep->statsp->port_stats.lb_mode ==
936 				nxge_lb_serdes10g)) {
937 			nxgep->statsp->mac_stats.link_speed = 10000;
938 		} else if ((nxgep->statsp->port_stats.lb_mode
939 				== nxge_lb_mac1000) ||
940 				(nxgep->statsp->port_stats.lb_mode ==
941 				nxge_lb_phy1000) ||
942 				(nxgep->statsp->port_stats.lb_mode ==
943 				nxge_lb_serdes1000)) {
944 			nxgep->statsp->mac_stats.link_speed = 1000;
945 		} else {
946 			nxgep->statsp->mac_stats.link_speed = 100;
947 		}
948 		nxgep->statsp->mac_stats.link_duplex = 2;
949 		nxgep->statsp->mac_stats.link_up = 1;
950 	}
951 	nxge_global_reset(nxgep);
952 
953 nxge_set_lb_exit:
954 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
955 		"<== nxge_set_lb status = 0x%08x", status));
956 	return (status);
957 }
958 
959 /* ARGSUSED */
960 void
961 nxge_set_lb_normal(p_nxge_t nxgep)
962 {
963 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_lb_normal"));
964 	nxgep->statsp->port_stats.lb_mode = nxge_lb_normal;
965 	nxgep->param_arr[param_autoneg].value =
966 		nxgep->param_arr[param_autoneg].old_value;
967 	nxgep->param_arr[param_anar_1000fdx].value =
968 		nxgep->param_arr[param_anar_1000fdx].old_value;
969 	nxgep->param_arr[param_anar_1000hdx].value =
970 		nxgep->param_arr[param_anar_1000hdx].old_value;
971 	nxgep->param_arr[param_anar_100fdx].value =
972 		nxgep->param_arr[param_anar_100fdx].old_value;
973 	nxgep->param_arr[param_anar_100hdx].value =
974 		nxgep->param_arr[param_anar_100hdx].old_value;
975 	nxgep->param_arr[param_anar_10fdx].value =
976 		nxgep->param_arr[param_anar_10fdx].old_value;
977 	nxgep->param_arr[param_master_cfg_enable].value =
978 		nxgep->param_arr[param_master_cfg_enable].old_value;
979 	nxgep->param_arr[param_master_cfg_value].value =
980 		nxgep->param_arr[param_master_cfg_value].old_value;
981 
982 	nxge_global_reset(nxgep);
983 
984 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
985 	(void) nxge_xcvr_find(nxgep);
986 	(void) nxge_link_init(nxgep);
987 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
988 
989 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_lb_normal"));
990 }
991 
992 /* ARGSUSED */
993 void
994 nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp)
995 {
996 	uint16_t reg;
997 
998 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_get_mii"));
999 
1000 	reg = *(uint16_t *)mp->b_rptr;
1001 	(void) nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn, reg,
1002 		(uint16_t *)mp->b_rptr);
1003 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "reg = 0x%08X value = 0x%04X",
1004 		reg, *(uint16_t *)mp->b_rptr));
1005 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_get_mii"));
1006 }
1007 
1008 /* ARGSUSED */
1009 void
1010 nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp)
1011 {
1012 	uint16_t *buf;
1013 	uint8_t reg;
1014 
1015 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_put_mii"));
1016 	buf = (uint16_t *)mp->b_rptr;
1017 	reg = (uint8_t)buf[0];
1018 	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
1019 		"reg = 0x%08X index = 0x%08X value = 0x%08X",
1020 		reg, buf[0], buf[1]));
1021 	(void) nxge_mii_write(nxgep, nxgep->statsp->mac_stats.xcvr_portn,
1022 		reg, buf[1]);
1023 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_put_mii"));
1024 }
1025 
1026 /* ARGSUSED */
1027 void
1028 nxge_check_hw_state(p_nxge_t nxgep)
1029 {
1030 	p_nxge_ldgv_t ldgvp;
1031 	p_nxge_ldv_t t_ldvp;
1032 
1033 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "==> nxge_check_hw_state"));
1034 
1035 	MUTEX_ENTER(nxgep->genlock);
1036 	nxgep->nxge_timerid = 0;
1037 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1038 		goto nxge_check_hw_state_exit;
1039 	}
1040 	nxge_check_tx_hang(nxgep);
1041 
1042 	ldgvp = nxgep->ldgvp;
1043 	if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
1044 		NXGE_ERROR_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
1045 				"NULL ldgvp (interrupt not ready)."));
1046 		goto nxge_check_hw_state_exit;
1047 	}
1048 	t_ldvp = ldgvp->ldvp_syserr;
1049 	if (!t_ldvp->use_timer) {
1050 		NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
1051 				"ldgvp $%p t_ldvp $%p use_timer flag %d",
1052 				ldgvp, t_ldvp, t_ldvp->use_timer));
1053 		goto nxge_check_hw_state_exit;
1054 	}
1055 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
1056 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1057 			"port%d Bad register acc handle", nxgep->mac.portnum));
1058 	}
1059 	(void) nxge_syserr_intr((void *) t_ldvp, (void *) nxgep);
1060 
1061 	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
1062 		NXGE_CHECK_TIMER);
1063 
1064 nxge_check_hw_state_exit:
1065 	MUTEX_EXIT(nxgep->genlock);
1066 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state"));
1067 }
1068 
1069 /*ARGSUSED*/
1070 static void
1071 nxge_rtrace_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
1072 	struct iocblk *iocp)
1073 {
1074 	ssize_t size;
1075 	rtrace_t *rtp;
1076 	mblk_t *nmp;
1077 	uint32_t i, j;
1078 	uint32_t start_blk;
1079 	uint32_t base_entry;
1080 	uint32_t num_entries;
1081 
1082 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_rtrace_ioctl"));
1083 
1084 	size = 1024;
1085 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
1086 		NXGE_DEBUG_MSG((nxgep, STR_CTL,
1087 				"malformed M_IOCTL MBLKL = %d size = %d",
1088 				MBLKL(mp->b_cont), size));
1089 		miocnak(wq, mp, 0, EINVAL);
1090 		return;
1091 	}
1092 	nmp = mp->b_cont;
1093 	rtp = (rtrace_t *)nmp->b_rptr;
1094 	start_blk = rtp->next_idx;
1095 	num_entries = rtp->last_idx;
1096 	base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
1097 
1098 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "start_blk = %d\n", start_blk));
1099 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "num_entries = %d\n", num_entries));
1100 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "base_entry = %d\n", base_entry));
1101 
1102 	rtp->next_idx = npi_rtracebuf.next_idx;
1103 	rtp->last_idx = npi_rtracebuf.last_idx;
1104 	rtp->wrapped = npi_rtracebuf.wrapped;
1105 	for (i = 0, j = base_entry; i < num_entries; i++, j++) {
1106 		rtp->buf[i].ctl_addr = npi_rtracebuf.buf[j].ctl_addr;
1107 		rtp->buf[i].val_l32 = npi_rtracebuf.buf[j].val_l32;
1108 		rtp->buf[i].val_h32 = npi_rtracebuf.buf[j].val_h32;
1109 	}
1110 
1111 	nmp->b_wptr = nmp->b_rptr + size;
1112 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_rtrace_ioctl"));
1113 	miocack(wq, mp, (int)size, 0);
1114 }
1115