xref: /titanic_50/usr/src/uts/common/io/nxge/nxge_hw.c (revision a60fc142342386d0b786e65fba901234400d7020)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/nxge/nxge_impl.h>
29 
30 /*
31  * Tunable Receive Completion Ring Configuration B parameters.
32  */
33 uint16_t nxge_rx_pkt_thres;	/* 16 bits */
34 uint8_t nxge_rx_pkt_timeout;	/* 6 bits based on DMA clock divider */
35 
36 lb_property_t lb_normal = {normal, "normal", nxge_lb_normal};
37 lb_property_t lb_external10g = {external, "external10g", nxge_lb_ext10g};
38 lb_property_t lb_external1000 = {external, "external1000", nxge_lb_ext1000};
39 lb_property_t lb_external100 = {external, "external100", nxge_lb_ext100};
40 lb_property_t lb_external10 = {external, "external10", nxge_lb_ext10};
41 lb_property_t lb_phy10g = {internal, "phy10g", nxge_lb_phy10g};
42 lb_property_t lb_phy1000 = {internal, "phy1000", nxge_lb_phy1000};
43 lb_property_t lb_phy = {internal, "phy", nxge_lb_phy};
44 lb_property_t lb_serdes10g = {internal, "serdes10g", nxge_lb_serdes10g};
45 lb_property_t lb_serdes1000 = {internal, "serdes", nxge_lb_serdes1000};
46 lb_property_t lb_mac10g = {internal, "mac10g", nxge_lb_mac10g};
47 lb_property_t lb_mac1000 = {internal, "mac1000", nxge_lb_mac1000};
48 lb_property_t lb_mac = {internal, "mac10/100", nxge_lb_mac};
49 
50 uint32_t nxge_lb_dbg = 1;
51 void nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp);
52 void nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp);
53 
54 extern uint32_t nxge_rx_mode;
55 extern uint32_t nxge_jumbo_mtu;
56 extern boolean_t nxge_jumbo_enable;
57 
58 static void
59 nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
60 
61 /* ARGSUSED */
62 void
63 nxge_global_reset(p_nxge_t nxgep)
64 {
65 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_global_reset"));
66 
67 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
68 	(void) nxge_intr_hw_disable(nxgep);
69 
70 	if ((nxgep->suspended) ||
71 			((nxgep->statsp->port_stats.lb_mode ==
72 			nxge_lb_phy1000) ||
73 			(nxgep->statsp->port_stats.lb_mode ==
74 			nxge_lb_phy10g) ||
75 			(nxgep->statsp->port_stats.lb_mode ==
76 			nxge_lb_serdes1000) ||
77 			(nxgep->statsp->port_stats.lb_mode ==
78 			nxge_lb_serdes10g))) {
79 		(void) nxge_link_init(nxgep);
80 	}
81 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
82 	(void) nxge_mac_init(nxgep);
83 	(void) nxge_intr_hw_enable(nxgep);
84 
85 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_global_reset"));
86 }
87 
88 /* ARGSUSED */
89 void
90 nxge_hw_id_init(p_nxge_t nxgep)
91 {
92 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_id_init"));
93 	/*
94 	 * Set up initial hardware parameters required such as mac mtu size.
95 	 */
96 	nxgep->mac.is_jumbo = B_FALSE;
97 	nxgep->mac.maxframesize = NXGE_MTU_DEFAULT_MAX;	/* 1522 */
98 	if (nxgep->param_arr[param_accept_jumbo].value || nxge_jumbo_enable) {
99 		nxgep->mac.maxframesize = (uint16_t)nxge_jumbo_mtu;
100 		nxgep->mac.is_jumbo = B_TRUE;
101 	}
102 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
103 		"==> nxge_hw_id_init: maxframesize %d",
104 		nxgep->mac.maxframesize));
105 
106 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init"));
107 }
108 
109 /* ARGSUSED */
110 void
111 nxge_hw_init_niu_common(p_nxge_t nxgep)
112 {
113 	p_nxge_hw_list_t hw_p;
114 
115 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_init_niu_common"));
116 
117 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
118 		return;
119 	}
120 	MUTEX_ENTER(&hw_p->nxge_cfg_lock);
121 	if (hw_p->flags & COMMON_INIT_DONE) {
122 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
123 			"nxge_hw_init_niu_common"
124 			" already done for dip $%p function %d exiting",
125 			hw_p->parent_devp, nxgep->function_num));
126 		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
127 		return;
128 	}
129 
130 	hw_p->flags = COMMON_INIT_START;
131 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
132 		" Started for device id %x with function %d",
133 		hw_p->parent_devp, nxgep->function_num));
134 
135 	/* per neptune common block init */
136 	(void) nxge_fflp_hw_reset(nxgep);
137 
138 	hw_p->flags = COMMON_INIT_DONE;
139 	MUTEX_EXIT(&hw_p->nxge_cfg_lock);
140 
141 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
142 		" Done for device id %x with function %d",
143 		hw_p->parent_devp, nxgep->function_num));
144 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_init_niu_common"));
145 }
146 
147 /* ARGSUSED */
148 uint_t
149 nxge_intr(void *arg1, void *arg2)
150 {
151 	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
152 	p_nxge_t nxgep = (p_nxge_t)arg2;
153 	uint_t serviced = DDI_INTR_UNCLAIMED;
154 	uint8_t ldv;
155 	npi_handle_t handle;
156 	p_nxge_ldgv_t ldgvp;
157 	p_nxge_ldg_t ldgp, t_ldgp;
158 	p_nxge_ldv_t t_ldvp;
159 	uint64_t vector0 = 0, vector1 = 0, vector2 = 0;
160 	int i, j, nldvs, nintrs = 1;
161 	npi_status_t rs = NPI_SUCCESS;
162 
163 	/* DDI interface returns second arg as NULL (n2 niumx driver) !!! */
164 	if (arg2 == NULL || (void *) ldvp->nxgep != arg2) {
165 		nxgep = ldvp->nxgep;
166 	}
167 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr"));
168 
169 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
170 		NXGE_ERROR_MSG((nxgep, INT_CTL,
171 			"<== nxge_intr: not initialized 0x%x", serviced));
172 		return (serviced);
173 	}
174 
175 	ldgvp = nxgep->ldgvp;
176 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: ldgvp $%p", ldgvp));
177 	if (ldvp == NULL && ldgvp) {
178 		t_ldvp = ldvp = ldgvp->ldvp;
179 	}
180 	if (ldvp) {
181 		ldgp = t_ldgp = ldvp->ldgp;
182 	}
183 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
184 		"ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
185 	if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
186 		NXGE_ERROR_MSG((nxgep, INT_CTL, "==> nxge_intr: "
187 			"ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
188 		NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_intr: not ready"));
189 		return (DDI_INTR_UNCLAIMED);
190 	}
191 	/*
192 	 * This interrupt handler will have to go through all the logical
193 	 * devices to find out which logical device interrupts us and then call
194 	 * its handler to process the events.
195 	 */
196 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
197 	t_ldgp = ldgp;
198 	t_ldvp = ldgp->ldvp;
199 
200 	nldvs = ldgp->nldvs;
201 
202 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: #ldvs %d #intrs %d",
203 			nldvs, ldgvp->ldg_intrs));
204 
205 	serviced = DDI_INTR_CLAIMED;
206 	for (i = 0; i < nintrs; i++, t_ldgp++) {
207 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr(%d): #ldvs %d "
208 				" #intrs %d", i, nldvs, nintrs));
209 		/* Get this group's flag bits.  */
210 		t_ldgp->interrupted = B_FALSE;
211 		rs = npi_ldsv_ldfs_get(handle, t_ldgp->ldg,
212 			&vector0, &vector1, &vector2);
213 		if (rs) {
214 			continue;
215 		}
216 		if (!vector0 && !vector1 && !vector2) {
217 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
218 				"no interrupts on group %d", t_ldgp->ldg));
219 			continue;
220 		}
221 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
222 			"vector0 0x%llx vector1 0x%llx vector2 0x%llx",
223 			vector0, vector1, vector2));
224 		t_ldgp->interrupted = B_TRUE;
225 		nldvs = t_ldgp->nldvs;
226 		for (j = 0; j < nldvs; j++, t_ldvp++) {
227 			/*
228 			 * Call device's handler if flag bits are on.
229 			 */
230 			ldv = t_ldvp->ldv;
231 			if (((ldv < NXGE_MAC_LD_START) &&
232 					(LDV_ON(ldv, vector0) |
233 					(LDV_ON(ldv, vector1)))) ||
234 					(ldv >= NXGE_MAC_LD_START &&
235 					((LDV2_ON_1(ldv, vector2)) ||
236 					(LDV2_ON_2(ldv, vector2))))) {
237 				(void) (t_ldvp->ldv_intr_handler)(
238 					(caddr_t)t_ldvp, arg2);
239 				NXGE_DEBUG_MSG((nxgep, INT_CTL,
240 					"==> nxge_intr: "
241 					"calling device %d #ldvs %d #intrs %d",
242 					j, nldvs, nintrs));
243 			}
244 		}
245 	}
246 
247 	t_ldgp = ldgp;
248 	for (i = 0; i < nintrs; i++, t_ldgp++) {
249 		/* rearm group interrupts */
250 		if (t_ldgp->interrupted) {
251 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: arm "
252 				"group %d", t_ldgp->ldg));
253 			(void) npi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
254 				t_ldgp->arm, t_ldgp->ldg_timer);
255 		}
256 	}
257 
258 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr: serviced 0x%x",
259 		serviced));
260 	return (serviced);
261 }
262 
263 /* ARGSUSED */
264 uint_t
265 nxge_syserr_intr(void *arg1, void *arg2)
266 {
267 	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
268 	p_nxge_t nxgep = (p_nxge_t)arg2;
269 	p_nxge_ldg_t ldgp = NULL;
270 	npi_handle_t handle;
271 	sys_err_stat_t estat;
272 	uint_t serviced = DDI_INTR_UNCLAIMED;
273 
274 	if (arg1 == NULL && arg2 == NULL) {
275 		return (serviced);
276 	}
277 	if (arg2 == NULL || ((ldvp != NULL && (void *) ldvp->nxgep != arg2))) {
278 		if (ldvp != NULL) {
279 			nxgep = ldvp->nxgep;
280 		}
281 	}
282 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
283 		"==> nxge_syserr_intr: arg2 $%p arg1 $%p", nxgep, ldvp));
284 	if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
285 		ldgp = ldvp->ldgp;
286 		if (ldgp == NULL) {
287 			NXGE_ERROR_MSG((nxgep, SYSERR_CTL,
288 				"<== nxge_syserrintr(no logical group): "
289 				"arg2 $%p arg1 $%p", nxgep, ldvp));
290 			return (DDI_INTR_UNCLAIMED);
291 		}
292 		/*
293 		 * Get the logical device state if the function uses interrupt.
294 		 */
295 	}
296 
297 	/* This interrupt handler is for system error interrupts.  */
298 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
299 	estat.value = 0;
300 	(void) npi_fzc_sys_err_stat_get(handle, &estat);
301 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
302 		"==> nxge_syserr_intr: device error 0x%016llx", estat.value));
303 
304 	if (estat.bits.ldw.smx) {
305 		/* SMX */
306 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
307 			"==> nxge_syserr_intr: device error - SMX"));
308 	} else if (estat.bits.ldw.mac) {
309 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
310 			"==> nxge_syserr_intr: device error - MAC"));
311 		/*
312 		 * There is nothing to be done here. All MAC errors go to per
313 		 * MAC port interrupt. MIF interrupt is the only MAC sub-block
314 		 * that can generate status here. MIF status reported will be
315 		 * ignored here. It is checked by per port timer instead.
316 		 */
317 	} else if (estat.bits.ldw.ipp) {
318 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
319 			"==> nxge_syserr_intr: device error - IPP"));
320 		(void) nxge_ipp_handle_sys_errors(nxgep);
321 	} else if (estat.bits.ldw.zcp) {
322 		/* ZCP */
323 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
324 			"==> nxge_syserr_intr: device error - ZCP"));
325 		(void) nxge_zcp_handle_sys_errors(nxgep);
326 	} else if (estat.bits.ldw.tdmc) {
327 		/* TDMC */
328 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
329 			"==> nxge_syserr_intr: device error - TDMC"));
330 		/*
331 		 * There is no TDMC system errors defined in the PRM. All TDMC
332 		 * channel specific errors are reported on a per channel basis.
333 		 */
334 	} else if (estat.bits.ldw.rdmc) {
335 		/* RDMC */
336 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
337 			"==> nxge_syserr_intr: device error - RDMC"));
338 		(void) nxge_rxdma_handle_sys_errors(nxgep);
339 	} else if (estat.bits.ldw.txc) {
340 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
341 			"==> nxge_syserr_intr: device error - TXC"));
342 		(void) nxge_txc_handle_sys_errors(nxgep);
343 	} else if ((nxgep->niu_type != N2_NIU) && estat.bits.ldw.peu) {
344 		/* PCI-E */
345 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
346 			"==> nxge_syserr_intr: device error - PCI-E"));
347 	} else if (estat.bits.ldw.meta1) {
348 		/* META1 */
349 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
350 			"==> nxge_syserr_intr: device error - META1"));
351 	} else if (estat.bits.ldw.meta2) {
352 		/* META2 */
353 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
354 			"==> nxge_syserr_intr: device error - META2"));
355 	} else if (estat.bits.ldw.fflp) {
356 		/* FFLP */
357 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
358 			"==> nxge_syserr_intr: device error - FFLP"));
359 		(void) nxge_fflp_handle_sys_errors(nxgep);
360 	}
361 	serviced = DDI_INTR_CLAIMED;
362 
363 	if (ldgp != NULL && ldvp != NULL && ldgp->nldvs == 1 &&
364 		!ldvp->use_timer) {
365 		(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
366 			B_TRUE, ldgp->ldg_timer);
367 	}
368 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_syserr_intr"));
369 	return (serviced);
370 }
371 
372 /* ARGSUSED */
373 void
374 nxge_intr_hw_enable(p_nxge_t nxgep)
375 {
376 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_enable"));
377 	(void) nxge_intr_mask_mgmt_set(nxgep, B_TRUE);
378 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_enable"));
379 }
380 
381 /* ARGSUSED */
382 void
383 nxge_intr_hw_disable(p_nxge_t nxgep)
384 {
385 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_disable"));
386 	(void) nxge_intr_mask_mgmt_set(nxgep, B_FALSE);
387 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_disable"));
388 }
389 
390 /* ARGSUSED */
391 void
392 nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
393 {
394 	p_nxge_t nxgep = (p_nxge_t)arg;
395 	uint8_t channel;
396 	npi_handle_t handle;
397 	p_nxge_ldgv_t ldgvp;
398 	p_nxge_ldv_t ldvp;
399 	int i;
400 
401 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_hw_blank"));
402 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
403 
404 	if ((ldgvp = nxgep->ldgvp) == NULL) {
405 		NXGE_ERROR_MSG((nxgep, INT_CTL,
406 			"<== nxge_rx_hw_blank (not enabled)"));
407 		return;
408 	}
409 	ldvp = nxgep->ldgvp->ldvp;
410 	if (ldvp == NULL) {
411 		return;
412 	}
413 	for (i = 0; i < ldgvp->nldvs; i++, ldvp++) {
414 		if (ldvp->is_rxdma) {
415 			channel = ldvp->channel;
416 			(void) npi_rxdma_cfg_rdc_rcr_threshold(handle,
417 				channel, count);
418 			(void) npi_rxdma_cfg_rdc_rcr_timeout(handle,
419 				channel, ticks);
420 		}
421 	}
422 
423 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_rx_hw_blank"));
424 }
425 
426 /* ARGSUSED */
427 void
428 nxge_hw_stop(p_nxge_t nxgep)
429 {
430 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_stop"));
431 
432 	(void) nxge_tx_mac_disable(nxgep);
433 	(void) nxge_rx_mac_disable(nxgep);
434 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
435 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
436 
437 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_stop"));
438 }
439 
440 /* ARGSUSED */
441 void
442 nxge_hw_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
443 {
444 	int cmd;
445 
446 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_hw_ioctl"));
447 
448 	if (nxgep == NULL) {
449 		miocnak(wq, mp, 0, EINVAL);
450 		return;
451 	}
452 	iocp->ioc_error = 0;
453 	cmd = iocp->ioc_cmd;
454 
455 	switch (cmd) {
456 	default:
457 		miocnak(wq, mp, 0, EINVAL);
458 		return;
459 
460 	case NXGE_GET_MII:
461 		nxge_get_mii(nxgep, mp->b_cont);
462 		miocack(wq, mp, sizeof (uint16_t), 0);
463 		break;
464 
465 	case NXGE_PUT_MII:
466 		nxge_put_mii(nxgep, mp->b_cont);
467 		miocack(wq, mp, 0, 0);
468 		break;
469 
470 	case NXGE_GET64:
471 		nxge_get64(nxgep, mp->b_cont);
472 		miocack(wq, mp, sizeof (uint32_t), 0);
473 		break;
474 
475 	case NXGE_PUT64:
476 		nxge_put64(nxgep, mp->b_cont);
477 		miocack(wq, mp, 0, 0);
478 		break;
479 
480 	case NXGE_PUT_TCAM:
481 		nxge_put_tcam(nxgep, mp->b_cont);
482 		miocack(wq, mp, 0, 0);
483 		break;
484 
485 	case NXGE_GET_TCAM:
486 		nxge_get_tcam(nxgep, mp->b_cont);
487 		miocack(wq, mp, 0, 0);
488 		break;
489 
490 	case NXGE_TX_REGS_DUMP:
491 		nxge_txdma_regs_dump_channels(nxgep);
492 		miocack(wq, mp, 0, 0);
493 		break;
494 	case NXGE_RX_REGS_DUMP:
495 		nxge_rxdma_regs_dump_channels(nxgep);
496 		miocack(wq, mp, 0, 0);
497 		break;
498 	case NXGE_VIR_INT_REGS_DUMP:
499 	case NXGE_INT_REGS_DUMP:
500 		nxge_virint_regs_dump(nxgep);
501 		miocack(wq, mp, 0, 0);
502 		break;
503 	case NXGE_RTRACE:
504 		nxge_rtrace_ioctl(nxgep, wq, mp, iocp);
505 		break;
506 	}
507 }
508 
509 /* ARGSUSED */
510 void
511 nxge_loopback_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
512 	struct iocblk *iocp)
513 {
514 	p_lb_property_t lb_props;
515 
516 	size_t size;
517 	int i;
518 
519 	if (mp->b_cont == NULL) {
520 		miocnak(wq, mp, 0, EINVAL);
521 	}
522 	switch (iocp->ioc_cmd) {
523 	case LB_GET_MODE:
524 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_MODE command"));
525 		if (nxgep != NULL) {
526 			*(lb_info_sz_t *)mp->b_cont->b_rptr =
527 				nxgep->statsp->port_stats.lb_mode;
528 			miocack(wq, mp, sizeof (nxge_lb_t), 0);
529 		} else
530 			miocnak(wq, mp, 0, EINVAL);
531 		break;
532 	case LB_SET_MODE:
533 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_SET_LB_MODE command"));
534 		if (iocp->ioc_count != sizeof (uint32_t)) {
535 			miocack(wq, mp, 0, 0);
536 			break;
537 		}
538 		if ((nxgep != NULL) && nxge_set_lb(nxgep, wq, mp->b_cont)) {
539 			miocack(wq, mp, 0, 0);
540 		} else {
541 			miocnak(wq, mp, 0, EPROTO);
542 		}
543 		break;
544 	case LB_GET_INFO_SIZE:
545 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
546 		if (nxgep != NULL) {
547 			size = sizeof (lb_normal);
548 			if (nxgep->statsp->mac_stats.cap_10gfdx) {
549 				size += sizeof (lb_external10g);
550 				size += sizeof (lb_phy10g);
551 				size += sizeof (lb_serdes10g);
552 				size += sizeof (lb_mac10g);
553 			}
554 			if (nxgep->statsp->mac_stats.cap_1000fdx) {
555 				size += sizeof (lb_external1000);
556 				size += sizeof (lb_mac1000);
557 				if (nxgep->mac.portmode == PORT_1G_COPPER)
558 					size += sizeof (lb_phy1000);
559 			}
560 			if (nxgep->statsp->mac_stats.cap_100fdx)
561 				size += sizeof (lb_external100);
562 			if (nxgep->statsp->mac_stats.cap_10fdx)
563 				size += sizeof (lb_external10);
564 			else if (nxgep->mac.portmode == PORT_1G_FIBER)
565 				size += sizeof (lb_serdes1000);
566 			*(lb_info_sz_t *)mp->b_cont->b_rptr = size;
567 
568 			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
569 				"NXGE_GET_LB_INFO command: size %d", size));
570 			miocack(wq, mp, sizeof (lb_info_sz_t), 0);
571 		} else
572 			miocnak(wq, mp, 0, EINVAL);
573 		break;
574 
575 	case LB_GET_INFO:
576 		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_INFO command"));
577 		if (nxgep != NULL) {
578 			size = sizeof (lb_normal);
579 			if (nxgep->statsp->mac_stats.cap_10gfdx) {
580 				size += sizeof (lb_external10g);
581 				size += sizeof (lb_phy10g);
582 				size += sizeof (lb_serdes10g);
583 				size += sizeof (lb_mac10g);
584 			}
585 			if (nxgep->statsp->mac_stats.cap_1000fdx) {
586 				size += sizeof (lb_external1000);
587 				size += sizeof (lb_mac1000);
588 				if (nxgep->mac.portmode == PORT_1G_COPPER)
589 					size += sizeof (lb_phy1000);
590 			}
591 			if (nxgep->statsp->mac_stats.cap_100fdx)
592 				size += sizeof (lb_external100);
593 			if (nxgep->statsp->mac_stats.cap_10fdx)
594 				size += sizeof (lb_external10);
595 			else if (nxgep->mac.portmode == PORT_1G_FIBER)
596 				size += sizeof (lb_serdes1000);
597 
598 			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
599 				"NXGE_GET_LB_INFO command: size %d", size));
600 			if (size == iocp->ioc_count) {
601 				i = 0;
602 				lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
603 				lb_props[i++] = lb_normal;
604 				if (nxgep->statsp->mac_stats.cap_10gfdx) {
605 					lb_props[i++] = lb_mac10g;
606 					lb_props[i++] = lb_serdes10g;
607 					lb_props[i++] = lb_phy10g;
608 					lb_props[i++] = lb_external10g;
609 				}
610 				if (nxgep->statsp->mac_stats.cap_1000fdx)
611 					lb_props[i++] = lb_external1000;
612 				if (nxgep->statsp->mac_stats.cap_100fdx)
613 					lb_props[i++] = lb_external100;
614 				if (nxgep->statsp->mac_stats.cap_10fdx)
615 					lb_props[i++] = lb_external10;
616 				if (nxgep->statsp->mac_stats.cap_1000fdx)
617 					lb_props[i++] = lb_mac1000;
618 				if (nxgep->mac.portmode == PORT_1G_COPPER) {
619 					if (nxgep->statsp->mac_stats.
620 						cap_1000fdx)
621 						lb_props[i++] = lb_phy1000;
622 				} else if (nxgep->mac.portmode ==
623 					PORT_1G_FIBER)
624 					lb_props[i++] = lb_serdes1000;
625 				miocack(wq, mp, size, 0);
626 			} else
627 				miocnak(wq, mp, 0, EINVAL);
628 		} else {
629 			miocnak(wq, mp, 0, EINVAL);
630 			cmn_err(CE_NOTE, "!nxge_hw_ioctl: invalid command 0x%x",
631 				iocp->ioc_cmd);
632 		}
633 		break;
634 	}
635 }
636 
637 /*
638  * DMA channel interfaces to access various channel specific
639  * hardware functions.
640  */
641 /* ARGSUSED */
642 void
643 nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle, void *reg_addrp,
644 	uint32_t reg_base, uint16_t channel, uint64_t reg_data)
645 {
646 	uint64_t reg_offset;
647 
648 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
649 
650 	/*
651 	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
652 	 * use the virtual DMA CSR address space from the config space (in PCI
653 	 * case), then the following code need to be use different offset
654 	 * computation macro.
655 	 */
656 	reg_offset = reg_base + DMC_OFFSET(channel);
657 	NXGE_PIO_WRITE64(handle, reg_addrp, reg_offset, reg_data);
658 
659 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
660 }
661 
662 /* ARGSUSED */
663 uint64_t
664 nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle, void *reg_addrp,
665 	uint32_t reg_base, uint16_t channel)
666 {
667 	uint64_t reg_offset;
668 
669 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
670 
671 	/*
672 	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
673 	 * use the virtual DMA CSR address space from the config space (in PCI
674 	 * case), then the following code need to be use different offset
675 	 * computation macro.
676 	 */
677 	reg_offset = reg_base + DMC_OFFSET(channel);
678 
679 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
680 
681 	return (NXGE_PIO_READ64(handle, reg_addrp, reg_offset));
682 }
683 
684 /* ARGSUSED */
685 void
686 nxge_get32(p_nxge_t nxgep, p_mblk_t mp)
687 {
688 	nxge_os_acc_handle_t nxge_regh;
689 
690 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
691 	nxge_regh = nxgep->dev_regs->nxge_regh;
692 
693 	*(uint32_t *)mp->b_rptr = NXGE_PIO_READ32(nxge_regh,
694 		nxgep->dev_regs->nxge_regp, *(uint32_t *)mp->b_rptr);
695 
696 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "value = 0x%08X",
697 		*(uint32_t *)mp->b_rptr));
698 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
699 }
700 
701 /* ARGSUSED */
702 void
703 nxge_put32(p_nxge_t nxgep, p_mblk_t mp)
704 {
705 	nxge_os_acc_handle_t nxge_regh;
706 	uint32_t *buf;
707 	uint8_t *reg;
708 
709 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
710 	nxge_regh = nxgep->dev_regs->nxge_regh;
711 
712 	buf = (uint32_t *)mp->b_rptr;
713 	reg = (uint8_t *)(nxgep->dev_regs->nxge_regp) + buf[0];
714 	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
715 		"reg = 0x%016llX index = 0x%08X value = 0x%08X",
716 		reg, buf[0], buf[1]));
717 	NXGE_PIO_WRITE32(nxge_regh, (uint32_t *)reg, 0, buf[1]);
718 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
719 }
720 
721 /*ARGSUSED*/
722 boolean_t
723 nxge_set_lb(p_nxge_t nxgep, queue_t *wq, p_mblk_t mp)
724 {
725 	boolean_t status = B_TRUE;
726 	uint32_t lb_mode;
727 	lb_property_t *lb_info;
728 
729 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_set_lb"));
730 	lb_mode = nxgep->statsp->port_stats.lb_mode;
731 	if (lb_mode == *(uint32_t *)mp->b_rptr) {
732 		cmn_err(CE_NOTE,
733 			"!nxge%d: Loopback mode already set (lb_mode %d).\n",
734 			nxgep->instance, lb_mode);
735 		status = B_FALSE;
736 		goto nxge_set_lb_exit;
737 	}
738 	lb_mode = *(uint32_t *)mp->b_rptr;
739 	lb_info = NULL;
740 	if (lb_mode == lb_normal.value)
741 		lb_info = &lb_normal;
742 	else if ((lb_mode == lb_external10g.value) &&
743 		(nxgep->statsp->mac_stats.cap_10gfdx))
744 		lb_info = &lb_external10g;
745 	else if ((lb_mode == lb_external1000.value) &&
746 		(nxgep->statsp->mac_stats.cap_1000fdx))
747 		lb_info = &lb_external1000;
748 	else if ((lb_mode == lb_external100.value) &&
749 		(nxgep->statsp->mac_stats.cap_100fdx))
750 		lb_info = &lb_external100;
751 	else if ((lb_mode == lb_external10.value) &&
752 		(nxgep->statsp->mac_stats.cap_10fdx))
753 		lb_info = &lb_external10;
754 	else if ((lb_mode == lb_phy10g.value) &&
755 			((nxgep->mac.portmode == PORT_10G_COPPER) ||
756 			(nxgep->mac.portmode == PORT_10G_FIBER)))
757 		lb_info = &lb_phy10g;
758 	else if ((lb_mode == lb_phy1000.value) &&
759 		(nxgep->mac.portmode == PORT_1G_COPPER))
760 		lb_info = &lb_phy1000;
761 	else if ((lb_mode == lb_phy.value) &&
762 		(nxgep->mac.portmode == PORT_1G_COPPER))
763 		lb_info = &lb_phy;
764 	else if ((lb_mode == lb_serdes10g.value) &&
765 			(nxgep->mac.portmode == PORT_10G_FIBER) ||
766 		(nxgep->mac.portmode == PORT_10G_COPPER))
767 		lb_info = &lb_serdes10g;
768 	else if ((lb_mode == lb_serdes1000.value) &&
769 		(nxgep->mac.portmode == PORT_1G_FIBER))
770 		lb_info = &lb_serdes1000;
771 	else if (lb_mode == lb_mac10g.value)
772 		lb_info = &lb_mac10g;
773 	else if (lb_mode == lb_mac1000.value)
774 		lb_info = &lb_mac1000;
775 	else if (lb_mode == lb_mac.value)
776 		lb_info = &lb_mac;
777 	else {
778 		cmn_err(CE_NOTE,
779 			"!nxge%d: Loopback mode not supported(mode %d).\n",
780 			nxgep->instance, lb_mode);
781 		status = B_FALSE;
782 		goto nxge_set_lb_exit;
783 	}
784 
785 	if (lb_mode == nxge_lb_normal) {
786 		if (nxge_lb_dbg) {
787 			cmn_err(CE_NOTE,
788 				"!nxge%d: Returning to normal operation",
789 				nxgep->instance);
790 		}
791 		nxge_set_lb_normal(nxgep);
792 		goto nxge_set_lb_exit;
793 	}
794 	nxgep->statsp->port_stats.lb_mode = lb_mode;
795 
796 	if (nxge_lb_dbg)
797 		cmn_err(CE_NOTE,
798 			"!nxge%d: Adapter now in %s loopback mode",
799 			nxgep->instance, lb_info->key);
800 	nxgep->param_arr[param_autoneg].value = 0;
801 	nxgep->param_arr[param_anar_10gfdx].value =
802 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
803 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
804 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
805 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g);
806 	nxgep->param_arr[param_anar_10ghdx].value = 0;
807 	nxgep->param_arr[param_anar_1000fdx].value =
808 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
809 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac1000) ||
810 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
811 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000);
812 	nxgep->param_arr[param_anar_1000hdx].value = 0;
813 	nxgep->param_arr[param_anar_100fdx].value =
814 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy) ||
815 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
816 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100);
817 	nxgep->param_arr[param_anar_100hdx].value = 0;
818 	nxgep->param_arr[param_anar_10fdx].value =
819 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
820 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10);
821 	if (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) {
822 		nxgep->param_arr[param_master_cfg_enable].value = 1;
823 		nxgep->param_arr[param_master_cfg_value].value = 1;
824 	}
825 	if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
826 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
827 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100) ||
828 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10) ||
829 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
830 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
831 		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy)) {
832 
833 		(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
834 		(void) nxge_xcvr_find(nxgep);
835 		(void) nxge_link_init(nxgep);
836 		(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
837 	}
838 	if (lb_info->lb_type == internal) {
839 		if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
840 				(nxgep->statsp->port_stats.lb_mode ==
841 				nxge_lb_phy10g) ||
842 				(nxgep->statsp->port_stats.lb_mode ==
843 				nxge_lb_serdes10g)) {
844 			nxgep->statsp->mac_stats.link_speed = 10000;
845 		} else if ((nxgep->statsp->port_stats.lb_mode
846 				== nxge_lb_mac1000) ||
847 				(nxgep->statsp->port_stats.lb_mode ==
848 				nxge_lb_phy1000) ||
849 				(nxgep->statsp->port_stats.lb_mode ==
850 				nxge_lb_serdes1000)) {
851 			nxgep->statsp->mac_stats.link_speed = 1000;
852 		} else {
853 			nxgep->statsp->mac_stats.link_speed = 100;
854 		}
855 		nxgep->statsp->mac_stats.link_duplex = 2;
856 		nxgep->statsp->mac_stats.link_up = 1;
857 	}
858 	nxge_global_reset(nxgep);
859 
860 nxge_set_lb_exit:
861 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
862 		"<== nxge_set_lb status = 0x%08x", status));
863 	return (status);
864 }
865 
866 /* ARGSUSED */
867 void
868 nxge_set_lb_normal(p_nxge_t nxgep)
869 {
870 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_lb_normal"));
871 	nxgep->statsp->port_stats.lb_mode = nxge_lb_normal;
872 	nxgep->param_arr[param_autoneg].value =
873 		nxgep->param_arr[param_autoneg].old_value;
874 	nxgep->param_arr[param_anar_1000fdx].value =
875 		nxgep->param_arr[param_anar_1000fdx].old_value;
876 	nxgep->param_arr[param_anar_1000hdx].value =
877 		nxgep->param_arr[param_anar_1000hdx].old_value;
878 	nxgep->param_arr[param_anar_100fdx].value =
879 		nxgep->param_arr[param_anar_100fdx].old_value;
880 	nxgep->param_arr[param_anar_100hdx].value =
881 		nxgep->param_arr[param_anar_100hdx].old_value;
882 	nxgep->param_arr[param_anar_10fdx].value =
883 		nxgep->param_arr[param_anar_10fdx].old_value;
884 	nxgep->param_arr[param_master_cfg_enable].value =
885 		nxgep->param_arr[param_master_cfg_enable].old_value;
886 	nxgep->param_arr[param_master_cfg_value].value =
887 		nxgep->param_arr[param_master_cfg_value].old_value;
888 
889 	nxge_global_reset(nxgep);
890 
891 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
892 	(void) nxge_xcvr_find(nxgep);
893 	(void) nxge_link_init(nxgep);
894 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
895 
896 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_lb_normal"));
897 }
898 
899 /* ARGSUSED */
900 void
901 nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp)
902 {
903 	uint16_t reg;
904 
905 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_get_mii"));
906 
907 	reg = *(uint16_t *)mp->b_rptr;
908 	(void) nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn, reg,
909 		(uint16_t *)mp->b_rptr);
910 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "reg = 0x%08X value = 0x%04X",
911 		reg, *(uint16_t *)mp->b_rptr));
912 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_get_mii"));
913 }
914 
915 /* ARGSUSED */
916 void
917 nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp)
918 {
919 	uint16_t *buf;
920 	uint8_t reg;
921 
922 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_put_mii"));
923 	buf = (uint16_t *)mp->b_rptr;
924 	reg = (uint8_t)buf[0];
925 	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
926 		"reg = 0x%08X index = 0x%08X value = 0x%08X",
927 		reg, buf[0], buf[1]));
928 	(void) nxge_mii_write(nxgep, nxgep->statsp->mac_stats.xcvr_portn,
929 		reg, buf[1]);
930 	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_put_mii"));
931 }
932 
933 /* ARGSUSED */
934 void
935 nxge_check_hw_state(p_nxge_t nxgep)
936 {
937 	p_nxge_ldgv_t ldgvp;
938 	p_nxge_ldv_t t_ldvp;
939 
940 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "==> nxge_check_hw_state"));
941 
942 	MUTEX_ENTER(nxgep->genlock);
943 	nxgep->nxge_timerid = 0;
944 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
945 		goto nxge_check_hw_state_exit;
946 	}
947 	nxge_check_tx_hang(nxgep);
948 
949 	ldgvp = nxgep->ldgvp;
950 	if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
951 		NXGE_ERROR_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
952 				"NULL ldgvp (interrupt not ready)."));
953 		goto nxge_check_hw_state_exit;
954 	}
955 	t_ldvp = ldgvp->ldvp_syserr;
956 	if (!t_ldvp->use_timer) {
957 		NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
958 				"ldgvp $%p t_ldvp $%p use_timer flag %d",
959 				ldgvp, t_ldvp, t_ldvp->use_timer));
960 		goto nxge_check_hw_state_exit;
961 	}
962 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
963 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
964 			"port%d Bad register acc handle", nxgep->mac.portnum));
965 	}
966 	(void) nxge_syserr_intr((void *) t_ldvp, (void *) nxgep);
967 
968 	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
969 		NXGE_CHECK_TIMER);
970 
971 nxge_check_hw_state_exit:
972 	MUTEX_EXIT(nxgep->genlock);
973 	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state"));
974 }
975 
976 /*ARGSUSED*/
977 static void
978 nxge_rtrace_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
979 	struct iocblk *iocp)
980 {
981 	ssize_t size;
982 	rtrace_t *rtp;
983 	mblk_t *nmp;
984 	uint32_t i, j;
985 	uint32_t start_blk;
986 	uint32_t base_entry;
987 	uint32_t num_entries;
988 
989 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_rtrace_ioctl"));
990 
991 	size = 1024;
992 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
993 		NXGE_DEBUG_MSG((nxgep, STR_CTL,
994 				"malformed M_IOCTL MBLKL = %d size = %d",
995 				MBLKL(mp->b_cont), size));
996 		miocnak(wq, mp, 0, EINVAL);
997 		return;
998 	}
999 	nmp = mp->b_cont;
1000 	rtp = (rtrace_t *)nmp->b_rptr;
1001 	start_blk = rtp->next_idx;
1002 	num_entries = rtp->last_idx;
1003 	base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
1004 
1005 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "start_blk = %d\n", start_blk));
1006 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "num_entries = %d\n", num_entries));
1007 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "base_entry = %d\n", base_entry));
1008 
1009 	rtp->next_idx = npi_rtracebuf.next_idx;
1010 	rtp->last_idx = npi_rtracebuf.last_idx;
1011 	rtp->wrapped = npi_rtracebuf.wrapped;
1012 	for (i = 0, j = base_entry; i < num_entries; i++, j++) {
1013 		rtp->buf[i].ctl_addr = npi_rtracebuf.buf[j].ctl_addr;
1014 		rtp->buf[i].val_l32 = npi_rtracebuf.buf[j].val_l32;
1015 		rtp->buf[i].val_h32 = npi_rtracebuf.buf[j].val_h32;
1016 	}
1017 
1018 	nmp->b_wptr = nmp->b_rptr + size;
1019 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_rtrace_ioctl"));
1020 	miocack(wq, mp, (int)size, 0);
1021 }
1022