xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_fzc.c (revision 86ef0a63e1cfa5dc98606efef379365acca98063)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include	<nxge_impl.h>
28 #include	<npi_mac.h>
29 #include	<npi_rxdma.h>
30 #include	<nxge_hio.h>
31 
32 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
33 static int	nxge_herr2kerr(uint64_t);
34 static uint64_t nxge_init_hv_fzc_lp_op(p_nxge_t, uint64_t,
35     uint64_t, uint64_t, uint64_t, uint64_t);
36 #endif
37 
38 static nxge_status_t nxge_init_fzc_rdc_pages(p_nxge_t,
39     uint16_t, dma_log_page_t *, dma_log_page_t *);
40 
41 static nxge_status_t nxge_init_fzc_tdc_pages(p_nxge_t,
42     uint16_t, dma_log_page_t *, dma_log_page_t *);
43 
44 /*
45  * The following interfaces are controlled by the
46  * function control registers. Some global registers
47  * are to be initialized by only byt one of the 2/4 functions.
48  * Use the test and set register.
49  */
50 /*ARGSUSED*/
51 nxge_status_t
nxge_test_and_set(p_nxge_t nxgep,uint8_t tas)52 nxge_test_and_set(p_nxge_t nxgep, uint8_t tas)
53 {
54 	npi_handle_t		handle;
55 	npi_status_t		rs = NPI_SUCCESS;
56 
57 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
58 	if ((rs = npi_dev_func_sr_sr_get_set_clear(handle, tas))
59 	    != NPI_SUCCESS) {
60 		return (NXGE_ERROR | rs);
61 	}
62 
63 	return (NXGE_OK);
64 }
65 
66 nxge_status_t
nxge_set_fzc_multi_part_ctl(p_nxge_t nxgep,boolean_t mpc)67 nxge_set_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t mpc)
68 {
69 	npi_handle_t		handle;
70 	npi_status_t		rs = NPI_SUCCESS;
71 
72 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_set_fzc_multi_part_ctl"));
73 
74 	/*
75 	 * In multi-partitioning, the partition manager
76 	 * who owns function zero should set this multi-partition
77 	 * control bit.
78 	 */
79 	if (nxgep->use_partition && nxgep->function_num) {
80 		return (NXGE_ERROR);
81 	}
82 
83 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
84 	if ((rs = npi_fzc_mpc_set(handle, mpc)) != NPI_SUCCESS) {
85 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
86 		    "<== nxge_set_fzc_multi_part_ctl"));
87 		return (NXGE_ERROR | rs);
88 	}
89 
90 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_set_fzc_multi_part_ctl"));
91 
92 	return (NXGE_OK);
93 }
94 
95 nxge_status_t
nxge_get_fzc_multi_part_ctl(p_nxge_t nxgep,boolean_t * mpc_p)96 nxge_get_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t *mpc_p)
97 {
98 	npi_handle_t		handle;
99 	npi_status_t		rs = NPI_SUCCESS;
100 
101 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
102 
103 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
104 	if ((rs = npi_fzc_mpc_get(handle, mpc_p)) != NPI_SUCCESS) {
105 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
106 		    "<== nxge_set_fzc_multi_part_ctl"));
107 		return (NXGE_ERROR | rs);
108 	}
109 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
110 
111 	return (NXGE_OK);
112 }
113 
114 /*
115  * System interrupt registers that are under function zero
116  * management.
117  */
118 nxge_status_t
nxge_fzc_intr_init(p_nxge_t nxgep)119 nxge_fzc_intr_init(p_nxge_t nxgep)
120 {
121 	nxge_status_t	status = NXGE_OK;
122 
123 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_init"));
124 
125 	/* Configure the initial timer resolution */
126 	if ((status = nxge_fzc_intr_tmres_set(nxgep)) != NXGE_OK) {
127 		return (status);
128 	}
129 
130 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
131 		/*
132 		 * Set up the logical device group's logical devices that
133 		 * the group owns.
134 		 */
135 		if ((status = nxge_fzc_intr_ldg_num_set(nxgep)) != NXGE_OK)
136 			goto fzc_intr_init_exit;
137 
138 		/* Configure the system interrupt data */
139 		if ((status = nxge_fzc_intr_sid_set(nxgep)) != NXGE_OK)
140 			goto fzc_intr_init_exit;
141 	}
142 
143 fzc_intr_init_exit:
144 
145 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_init"));
146 
147 	return (status);
148 }
149 
150 nxge_status_t
nxge_fzc_intr_ldg_num_set(p_nxge_t nxgep)151 nxge_fzc_intr_ldg_num_set(p_nxge_t nxgep)
152 {
153 	p_nxge_ldg_t	ldgp;
154 	p_nxge_ldv_t	ldvp;
155 	npi_handle_t	handle;
156 	int		i, j;
157 	npi_status_t	rs = NPI_SUCCESS;
158 
159 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_ldg_num_set"));
160 
161 	if (nxgep->ldgvp == NULL) {
162 		return (NXGE_ERROR);
163 	}
164 
165 	ldgp = nxgep->ldgvp->ldgp;
166 	ldvp = nxgep->ldgvp->ldvp;
167 	if (ldgp == NULL || ldvp == NULL) {
168 		return (NXGE_ERROR);
169 	}
170 
171 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
172 
173 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
174 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
175 		    "==> nxge_fzc_intr_ldg_num_set "
176 		    "<== nxge_f(Neptune): # ldv %d "
177 		    "in group %d", ldgp->nldvs, ldgp->ldg));
178 
179 		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
180 			rs = npi_fzc_ldg_num_set(handle, ldvp->ldv,
181 			    ldvp->ldg_assigned);
182 			if (rs != NPI_SUCCESS) {
183 				NXGE_DEBUG_MSG((nxgep, INT_CTL,
184 				    "<== nxge_fzc_intr_ldg_num_set failed "
185 				    " rs 0x%x ldv %d ldg %d",
186 				    rs, ldvp->ldv, ldvp->ldg_assigned));
187 				return (NXGE_ERROR | rs);
188 			}
189 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
190 			    "<== nxge_fzc_intr_ldg_num_set OK "
191 			    " ldv %d ldg %d",
192 			    ldvp->ldv, ldvp->ldg_assigned));
193 		}
194 	}
195 
196 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_ldg_num_set"));
197 
198 	return (NXGE_OK);
199 }
200 
201 nxge_status_t
nxge_fzc_intr_tmres_set(p_nxge_t nxgep)202 nxge_fzc_intr_tmres_set(p_nxge_t nxgep)
203 {
204 	npi_handle_t	handle;
205 	npi_status_t	rs = NPI_SUCCESS;
206 
207 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_tmrese_set"));
208 	if (nxgep->ldgvp == NULL) {
209 		return (NXGE_ERROR);
210 	}
211 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
212 	if ((rs = npi_fzc_ldg_timer_res_set(handle, nxgep->ldgvp->tmres))) {
213 		return (NXGE_ERROR | rs);
214 	}
215 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_tmrese_set"));
216 
217 	return (NXGE_OK);
218 }
219 
220 nxge_status_t
nxge_fzc_intr_sid_set(p_nxge_t nxgep)221 nxge_fzc_intr_sid_set(p_nxge_t nxgep)
222 {
223 	npi_handle_t	handle;
224 	p_nxge_ldg_t	ldgp;
225 	fzc_sid_t	sid;
226 	int		i;
227 	npi_status_t	rs = NPI_SUCCESS;
228 
229 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_sid_set"));
230 	if (nxgep->ldgvp == NULL) {
231 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
232 		    "<== nxge_fzc_intr_sid_set: no ldg"));
233 		return (NXGE_ERROR);
234 	}
235 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
236 	ldgp = nxgep->ldgvp->ldgp;
237 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
238 	    "==> nxge_fzc_intr_sid_set: #int %d", nxgep->ldgvp->ldg_intrs));
239 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
240 		sid.ldg = ldgp->ldg;
241 		sid.niu = B_FALSE;
242 		sid.func = ldgp->func;
243 		sid.vector = ldgp->vector;
244 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
245 		    "==> nxge_fzc_intr_sid_set(%d): func %d group %d "
246 		    "vector %d",
247 		    i, sid.func, sid.ldg, sid.vector));
248 		rs = npi_fzc_sid_set(handle, sid);
249 		if (rs != NPI_SUCCESS) {
250 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
251 			    "<== nxge_fzc_intr_sid_set:failed 0x%x",
252 			    rs));
253 			return (NXGE_ERROR | rs);
254 		}
255 	}
256 
257 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_sid_set"));
258 
259 	return (NXGE_OK);
260 
261 }
262 
263 /*
264  * nxge_init_fzc_rdc
265  *
266  *	Initialize all of a RDC's FZC_DMC registers.
267  *	This is executed by the service domain, on behalf of a
268  *	guest domain, who cannot access these registers.
269  *
270  * Arguments:
271  *	nxgep
272  *	channel		The channel to initialize.
273  *
274  * NPI_NXGE function calls:
275  *	nxge_init_fzc_rdc_pages()
276  *
277  * Context:
278  *	Service Domain
279  */
280 /*ARGSUSED*/
281 nxge_status_t
nxge_init_fzc_rdc(p_nxge_t nxgep,uint16_t channel)282 nxge_init_fzc_rdc(p_nxge_t nxgep, uint16_t channel)
283 {
284 	nxge_status_t	status = NXGE_OK;
285 
286 	dma_log_page_t	page1, page2;
287 	npi_handle_t	handle;
288 	rdc_red_para_t	red;
289 
290 	/*
291 	 * Initialize the RxDMA channel-specific FZC control
292 	 * registers.
293 	 */
294 
295 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_tdc"));
296 
297 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
298 
299 	/* Reset RXDMA channel */
300 	status = npi_rxdma_cfg_rdc_reset(handle, channel);
301 	if (status != NPI_SUCCESS) {
302 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
303 		    "==> nxge_init_fzc_rdc: npi_rxdma_cfg_rdc_reset(%d) "
304 		    "returned 0x%08x", channel, status));
305 		return (NXGE_ERROR | status);
306 	}
307 
308 	/*
309 	 * These values have been copied from
310 	 * nxge_txdma.c:nxge_map_txdma_channel_cfg_ring().
311 	 */
312 	page1.page_num = 0;
313 	page1.valid = 1;
314 	page1.func_num = nxgep->function_num;
315 	page1.mask = 0;
316 	page1.value = 0;
317 	page1.reloc = 0;
318 
319 	page2.page_num = 1;
320 	page2.valid = 1;
321 	page2.func_num = nxgep->function_num;
322 	page2.mask = 0;
323 	page2.value = 0;
324 	page2.reloc = 0;
325 
326 	if (nxgep->niu_type == N2_NIU) {
327 #if !defined(NIU_HV_WORKAROUND)
328 		status = NXGE_OK;
329 #else
330 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
331 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
332 		    "set up logical pages"));
333 		/* Initialize the RXDMA logical pages */
334 		status = nxge_init_fzc_rdc_pages(nxgep, channel,
335 		    &page1, &page2);
336 		if (status != NXGE_OK) {
337 			return (status);
338 		}
339 #endif
340 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
341 		/* Initialize the RXDMA logical pages */
342 		status = nxge_init_fzc_rdc_pages(nxgep, channel,
343 		    &page1, &page2);
344 		if (status != NXGE_OK) {
345 			return (status);
346 		}
347 	} else {
348 		return (NXGE_ERROR);
349 	}
350 
351 	/*
352 	 * Configure RED parameters
353 	 */
354 	red.value = 0;
355 	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
356 	red.bits.ldw.thre =
357 	    (nxgep->nxge_port_rcr_size - RXDMA_RED_LESS_ENTRIES);
358 	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
359 	red.bits.ldw.thre_sync =
360 	    (nxgep->nxge_port_rcr_size - RXDMA_RED_LESS_ENTRIES);
361 
362 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
363 	    "==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
364 	    red.bits.ldw.thre_sync,
365 	    red.bits.ldw.thre_sync));
366 
367 	status |= npi_rxdma_cfg_wred_param(handle, channel, &red);
368 
369 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_rdc"));
370 
371 	return (status);
372 }
373 
374 /*
375  * nxge_init_fzc_rxdma_channel
376  *
377  *	Initialize all per-channel FZC_DMC registers.
378  *
379  * Arguments:
380  *	nxgep
381  *	channel		The channel to start
382  *
383  * NPI_NXGE function calls:
384  *	nxge_init_hv_fzc_rxdma_channel_pages()
385  *	nxge_init_fzc_rxdma_channel_pages()
386  *	nxge_init_fzc_rxdma_channel_red()
387  *
388  * Context:
389  *	Service Domain
390  */
391 /*ARGSUSED*/
392 nxge_status_t
nxge_init_fzc_rxdma_channel(p_nxge_t nxgep,uint16_t channel)393 nxge_init_fzc_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
394 {
395 	rx_rbr_ring_t		*rbr_ring;
396 	rx_rcr_ring_t		*rcr_ring;
397 
398 	nxge_status_t		status = NXGE_OK;
399 
400 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_init_fzc_rxdma_channel"));
401 
402 	rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
403 	rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
404 
405 	if (nxgep->niu_type == N2_NIU) {
406 #ifndef	NIU_HV_WORKAROUND
407 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
408 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
409 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - call HV "
410 		    "set up logical pages"));
411 		/* Initialize the RXDMA logical pages */
412 		status = nxge_init_hv_fzc_rxdma_channel_pages(nxgep, channel,
413 		    rbr_ring);
414 		if (status != NXGE_OK) {
415 			return (status);
416 		}
417 #endif
418 		status = NXGE_OK;
419 #else
420 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
421 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
422 		    "set up logical pages"));
423 		/* Initialize the RXDMA logical pages */
424 		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
425 		    rbr_ring);
426 		if (status != NXGE_OK) {
427 			return (status);
428 		}
429 #endif
430 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
431 		/* Initialize the RXDMA logical pages */
432 		status = nxge_init_fzc_rxdma_channel_pages(nxgep,
433 		    channel, rbr_ring);
434 		if (status != NXGE_OK) {
435 			return (status);
436 		}
437 	} else {
438 		return (NXGE_ERROR);
439 	}
440 
441 	/* Configure RED parameters */
442 	status = nxge_init_fzc_rxdma_channel_red(nxgep, channel, rcr_ring);
443 
444 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_init_fzc_rxdma_channel"));
445 	return (status);
446 }
447 
448 /*
449  * nxge_init_fzc_rdc_pages
450  *
451  *	Configure a TDC's logical pages.
452  *
453  *	This function is executed by the service domain, on behalf of
454  *	a guest domain, to whom this RDC has been loaned.
455  *
456  * Arguments:
457  *	nxgep
458  *	channel		The channel to initialize.
459  *	page0		Logical page 0 definition.
460  *	page1		Logical page 1 definition.
461  *
462  * Notes:
463  *	I think that this function can be called from any
464  *	domain, but I need to check.
465  *
466  * NPI/NXGE function calls:
467  *	hv_niu_tx_logical_page_conf()
468  *	hv_niu_tx_logical_page_info()
469  *
470  * Context:
471  *	Any domain
472  */
473 nxge_status_t
nxge_init_fzc_rdc_pages(p_nxge_t nxgep,uint16_t channel,dma_log_page_t * page0,dma_log_page_t * page1)474 nxge_init_fzc_rdc_pages(
475 	p_nxge_t nxgep,
476 	uint16_t channel,
477 	dma_log_page_t *page0,
478 	dma_log_page_t *page1)
479 {
480 	npi_handle_t handle;
481 	npi_status_t rs;
482 
483 	uint64_t page_handle;
484 
485 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
486 	    "==> nxge_init_fzc_txdma_channel_pages"));
487 
488 #ifndef	NIU_HV_WORKAROUND
489 	if (nxgep->niu_type == N2_NIU) {
490 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
491 		    "<== nxge_init_fzc_rdc_pages: "
492 		    "N2_NIU: no need to set rxdma logical pages"));
493 		return (NXGE_OK);
494 	}
495 #else
496 	if (nxgep->niu_type == N2_NIU) {
497 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
498 		    "<== nxge_init_fzc_rdc_pages: "
499 		    "N2_NIU: NEED to set rxdma logical pages"));
500 	}
501 #endif
502 
503 	/*
504 	 * Initialize logical page 1.
505 	 */
506 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
507 	if ((rs = npi_rxdma_cfg_logical_page(handle, channel, page0))
508 	    != NPI_SUCCESS)
509 		return (NXGE_ERROR | rs);
510 
511 	/*
512 	 * Initialize logical page 2.
513 	 */
514 	if ((rs = npi_rxdma_cfg_logical_page(handle, channel, page1))
515 	    != NPI_SUCCESS)
516 		return (NXGE_ERROR | rs);
517 
518 	/*
519 	 * Initialize the page handle.
520 	 * (In the current driver, this is always set to 0.)
521 	 */
522 	page_handle = 0;
523 	rs = npi_rxdma_cfg_logical_page_handle(handle, channel, page_handle);
524 	if (rs == NPI_SUCCESS) {
525 		return (NXGE_OK);
526 	} else {
527 		return (NXGE_ERROR | rs);
528 	}
529 }
530 
531 /*ARGSUSED*/
532 nxge_status_t
nxge_init_fzc_rxdma_channel_pages(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbrp)533 nxge_init_fzc_rxdma_channel_pages(p_nxge_t nxgep,
534     uint16_t channel, p_rx_rbr_ring_t rbrp)
535 {
536 	npi_handle_t		handle;
537 	dma_log_page_t		cfg;
538 	npi_status_t		rs = NPI_SUCCESS;
539 
540 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
541 	    "==> nxge_init_fzc_rxdma_channel_pages"));
542 
543 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
544 	/*
545 	 * Initialize logical page 1.
546 	 */
547 	cfg.func_num = nxgep->function_num;
548 	cfg.page_num = 0;
549 	cfg.valid = rbrp->page_valid.bits.ldw.page0;
550 	cfg.value = rbrp->page_value_1.value;
551 	cfg.mask = rbrp->page_mask_1.value;
552 	cfg.reloc = rbrp->page_reloc_1.value;
553 	rs = npi_rxdma_cfg_logical_page(handle, channel,
554 	    (p_dma_log_page_t)&cfg);
555 	if (rs != NPI_SUCCESS) {
556 		return (NXGE_ERROR | rs);
557 	}
558 
559 	/*
560 	 * Initialize logical page 2.
561 	 */
562 	cfg.page_num = 1;
563 	cfg.valid = rbrp->page_valid.bits.ldw.page1;
564 	cfg.value = rbrp->page_value_2.value;
565 	cfg.mask = rbrp->page_mask_2.value;
566 	cfg.reloc = rbrp->page_reloc_2.value;
567 
568 	rs = npi_rxdma_cfg_logical_page(handle, channel, &cfg);
569 	if (rs != NPI_SUCCESS) {
570 		return (NXGE_ERROR | rs);
571 	}
572 
573 	/* Initialize the page handle */
574 	rs = npi_rxdma_cfg_logical_page_handle(handle, channel,
575 	    rbrp->page_hdl.bits.ldw.handle);
576 
577 	if (rs != NPI_SUCCESS) {
578 		return (NXGE_ERROR | rs);
579 	}
580 
581 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
582 	    "<== nxge_init_fzc_rxdma_channel_pages"));
583 
584 	return (NXGE_OK);
585 }
586 
587 /*ARGSUSED*/
588 nxge_status_t
nxge_init_fzc_rxdma_channel_red(p_nxge_t nxgep,uint16_t channel,p_rx_rcr_ring_t rcr_p)589 nxge_init_fzc_rxdma_channel_red(p_nxge_t nxgep,
590     uint16_t channel, p_rx_rcr_ring_t rcr_p)
591 {
592 	npi_handle_t		handle;
593 	rdc_red_para_t		red;
594 	npi_status_t		rs = NPI_SUCCESS;
595 
596 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_channel_red"));
597 
598 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
599 	red.value = 0;
600 	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
601 	red.bits.ldw.thre = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
602 	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
603 	red.bits.ldw.thre_sync = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
604 
605 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
606 	    "==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
607 	    red.bits.ldw.thre_sync,
608 	    red.bits.ldw.thre_sync));
609 
610 	rs = npi_rxdma_cfg_wred_param(handle, channel, &red);
611 	if (rs != NPI_SUCCESS) {
612 		return (NXGE_ERROR | rs);
613 	}
614 
615 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
616 	    "<== nxge_init_fzc_rxdma_channel_red"));
617 
618 	return (NXGE_OK);
619 }
620 
621 /*
622  * nxge_init_fzc_tdc
623  *
624  *	Initialize all of a TDC's FZC_DMC registers.
625  *	This is executed by the service domain, on behalf of a
626  *	guest domain, who cannot access these registers.
627  *
628  * Arguments:
629  *	nxgep
630  *	channel		The channel to initialize.
631  *
632  * NPI_NXGE function calls:
633  *	nxge_init_fzc_tdc_pages()
634  *	npi_txc_dma_max_burst_set()
635  *
636  * Registers accessed:
637  *	TXC_DMA_MAX_BURST
638  *
639  * Context:
640  *	Service Domain
641  */
642 /*ARGSUSED*/
643 nxge_status_t
nxge_init_fzc_tdc(p_nxge_t nxgep,uint16_t channel)644 nxge_init_fzc_tdc(p_nxge_t nxgep, uint16_t channel)
645 {
646 	nxge_status_t	status = NXGE_OK;
647 
648 	dma_log_page_t	page1, page2;
649 	npi_handle_t	handle;
650 
651 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_tdc"));
652 
653 	/*
654 	 * These values have been copied from
655 	 * nxge_txdma.c:nxge_map_txdma_channel_cfg_ring().
656 	 */
657 	page1.page_num = 0;
658 	page1.valid = 1;
659 	page1.func_num = nxgep->function_num;
660 	page1.mask = 0;
661 	page1.value = 0;
662 	page1.reloc = 0;
663 
664 	page1.page_num = 1;
665 	page1.valid = 1;
666 	page1.func_num = nxgep->function_num;
667 	page1.mask = 0;
668 	page1.value = 0;
669 	page1.reloc = 0;
670 
671 #ifdef	NIU_HV_WORKAROUND
672 	if (nxgep->niu_type == N2_NIU) {
673 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
674 		    "==> nxge_init_fzc_txdma_channel "
675 		    "N2_NIU: NEED to set up txdma logical pages"));
676 		/* Initialize the TXDMA logical pages */
677 		(void) nxge_init_fzc_tdc_pages(nxgep, channel,
678 		    &page1, &page2);
679 	}
680 #endif
681 	if (nxgep->niu_type != N2_NIU) {
682 		if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
683 			/* Initialize the TXDMA logical pages */
684 			(void) nxge_init_fzc_tdc_pages(nxgep, channel,
685 			    &page1, &page2);
686 		} else
687 			return (NXGE_ERROR);
688 	}
689 
690 	/*
691 	 * Configure the TXC DMA Max Burst value.
692 	 *
693 	 * PRM.13.5
694 	 *
695 	 * TXC DMA Max Burst. TXC_DMA_MAX (FZC_TXC + 0000016)
696 	 * 19:0		dma_max_burst		RW
697 	 * Max burst value associated with DMA. Used by DRR engine
698 	 * for computing when DMA has gone into deficit.
699 	 */
700 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
701 	(void) npi_txc_dma_max_burst_set(
702 	    handle, channel, TXC_DMA_MAX_BURST_DEFAULT);
703 
704 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_tdc"));
705 
706 	return (status);
707 }
708 
709 /*ARGSUSED*/
710 nxge_status_t
nxge_init_fzc_txdma_channel(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p,p_tx_mbox_t mbox_p)711 nxge_init_fzc_txdma_channel(p_nxge_t nxgep, uint16_t channel,
712     p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p)
713 {
714 	nxge_status_t	status = NXGE_OK;
715 
716 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
717 	    "==> nxge_init_fzc_txdma_channel"));
718 
719 	if (nxgep->niu_type == N2_NIU) {
720 #ifndef	NIU_HV_WORKAROUND
721 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
722 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
723 		    "==> nxge_init_fzc_txdma_channel "
724 		    "N2_NIU: call HV to set up txdma logical pages"));
725 		status = nxge_init_hv_fzc_txdma_channel_pages(nxgep, channel,
726 		    tx_ring_p);
727 		if (status != NXGE_OK) {
728 			return (status);
729 		}
730 #endif
731 		status = NXGE_OK;
732 #else
733 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
734 		    "==> nxge_init_fzc_txdma_channel "
735 		    "N2_NIU: NEED to set up txdma logical pages"));
736 		/* Initialize the TXDMA logical pages */
737 		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
738 		    tx_ring_p);
739 #endif
740 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
741 		/* Initialize the TXDMA logical pages */
742 		(void) nxge_init_fzc_txdma_channel_pages(nxgep,
743 		    channel, tx_ring_p);
744 	} else {
745 		return (NXGE_ERROR);
746 	}
747 
748 	/*
749 	 * Configure Transmit DRR Weight parameters
750 	 * (It actually programs the TXC max burst register).
751 	 */
752 	(void) nxge_init_fzc_txdma_channel_drr(nxgep, channel, tx_ring_p);
753 
754 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
755 	    "<== nxge_init_fzc_txdma_channel"));
756 	return (status);
757 }
758 
759 
760 nxge_status_t
nxge_init_fzc_rx_common(p_nxge_t nxgep)761 nxge_init_fzc_rx_common(p_nxge_t nxgep)
762 {
763 	npi_handle_t	handle;
764 	npi_status_t	rs = NPI_SUCCESS;
765 	nxge_status_t	status = NXGE_OK;
766 	nxge_rdc_grp_t	*rdc_grp_p;
767 	clock_t		lbolt;
768 	int		table;
769 
770 	nxge_hw_pt_cfg_t *hardware;
771 
772 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rx_common"));
773 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
774 	if (!handle.regp) {
775 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
776 		    "==> nxge_init_fzc_rx_common null ptr"));
777 		return (NXGE_ERROR);
778 	}
779 
780 	/*
781 	 * Configure the rxdma clock divider
782 	 * This is the granularity counter based on
783 	 * the hardware system clock (i.e. 300 Mhz) and
784 	 * it is running around 3 nanoseconds.
785 	 * So, set the clock divider counter to 1000 to get
786 	 * microsecond granularity.
787 	 * For example, for a 3 microsecond timeout, the timeout
788 	 * will be set to 1.
789 	 */
790 	rs = npi_rxdma_cfg_clock_div_set(handle, RXDMA_CK_DIV_DEFAULT);
791 	if (rs != NPI_SUCCESS)
792 		return (NXGE_ERROR | rs);
793 
794 
795 	/*
796 	 * Enable WRED and program an initial value.
797 	 * Use time to set the initial random number.
798 	 */
799 	(void) drv_getparm(LBOLT, &lbolt);
800 	rs = npi_rxdma_cfg_red_rand_init(handle, (uint16_t)lbolt);
801 	if (rs != NPI_SUCCESS)
802 		return (NXGE_ERROR | rs);
803 
804 	hardware = &nxgep->pt_config.hw_config;
805 	for (table = 0; table < NXGE_MAX_RDC_GRPS; table++) {
806 		/* Does this table belong to <nxgep>? */
807 		if (hardware->grpids[table] == (nxgep->function_num + 256)) {
808 			rdc_grp_p = &nxgep->pt_config.rdc_grps[table];
809 			status = nxge_init_fzc_rdc_tbl(nxgep, rdc_grp_p, table);
810 		}
811 	}
812 
813 	/* Ethernet Timeout Counter (?) */
814 
815 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
816 	    "<== nxge_init_fzc_rx_common:status 0x%08x", status));
817 
818 	return (status);
819 }
820 
821 nxge_status_t
nxge_init_fzc_rdc_tbl(p_nxge_t nxge,nxge_rdc_grp_t * group,int rdc_tbl)822 nxge_init_fzc_rdc_tbl(p_nxge_t nxge, nxge_rdc_grp_t *group, int rdc_tbl)
823 {
824 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
825 	nx_rdc_tbl_t	*table;
826 	npi_handle_t	handle;
827 
828 	npi_status_t	rs = NPI_SUCCESS;
829 	nxge_status_t	status = NXGE_OK;
830 
831 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "==> nxge_init_fzc_rdc_tbl(%d)", table));
832 
833 	/* This RDC table must have been previously bound to <nxge>. */
834 	MUTEX_ENTER(&nhd->lock);
835 	table = &nhd->rdc_tbl[rdc_tbl];
836 	if (table->nxge != (uintptr_t)nxge) {
837 		MUTEX_EXIT(&nhd->lock);
838 		NXGE_ERROR_MSG((nxge, DMA_CTL,
839 		    "nxge_init_fzc_rdc_tbl(%d): not owner", table));
840 		return (NXGE_ERROR);
841 	} else {
842 		table->map = group->map;
843 	}
844 	MUTEX_EXIT(&nhd->lock);
845 
846 	handle = NXGE_DEV_NPI_HANDLE(nxge);
847 
848 	rs = npi_rxdma_rdc_table_config(handle, rdc_tbl,
849 	    group->map, group->max_rdcs);
850 
851 	if (rs != NPI_SUCCESS) {
852 		status = NXGE_ERROR | rs;
853 	}
854 
855 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "<== nxge_init_fzc_rdc_tbl(%d)", table));
856 	return (status);
857 }
858 
859 static
860 int
rdc_tbl_bind(p_nxge_t nxge,int rdc_tbl)861 rdc_tbl_bind(p_nxge_t nxge, int rdc_tbl)
862 {
863 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
864 	nx_rdc_tbl_t *table;
865 	int i;
866 
867 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "==> nxge_fzc_rdc_tbl_bind"));
868 
869 	MUTEX_ENTER(&nhd->lock);
870 	/* is the caller asking for a particular table? */
871 	if (rdc_tbl >= 0 && rdc_tbl < NXGE_MAX_RDC_GROUPS) {
872 		table = &nhd->rdc_tbl[rdc_tbl];
873 		if (table->nxge == 0) {
874 			table->nxge = (uintptr_t)nxge; /* It is now bound. */
875 			NXGE_DEBUG_MSG((nxge, DMA_CTL,
876 			    "<== nxge_fzc_rdc_tbl_bind(%d)", rdc_tbl));
877 			MUTEX_EXIT(&nhd->lock);
878 			return (rdc_tbl);
879 		}
880 	} else {	/* The caller will take any old RDC table. */
881 		for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
882 			nx_rdc_tbl_t *table = &nhd->rdc_tbl[i];
883 			if (table->nxge == 0) {
884 				table->nxge = (uintptr_t)nxge;
885 				/* It is now bound. */
886 				MUTEX_EXIT(&nhd->lock);
887 				NXGE_DEBUG_MSG((nxge, DMA_CTL,
888 				    "<== nxge_fzc_rdc_tbl_bind: %d", i));
889 				return (i);
890 			}
891 		}
892 	}
893 	MUTEX_EXIT(&nhd->lock);
894 
895 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_fzc_rdc_tbl_bind"));
896 
897 	return (-EBUSY);	/* RDC tables are bound. */
898 }
899 
900 int
nxge_fzc_rdc_tbl_bind(nxge_t * nxge,int grp_index,int acceptNoSubstitutes)901 nxge_fzc_rdc_tbl_bind(
902 	nxge_t *nxge,
903 	int grp_index,
904 	int acceptNoSubstitutes)
905 {
906 	nxge_hw_pt_cfg_t *hardware;
907 	int index;
908 
909 	hardware = &nxge->pt_config.hw_config;
910 
911 	if ((index = rdc_tbl_bind(nxge, grp_index)) < 0) {
912 		if (acceptNoSubstitutes)
913 			return (index);
914 		index = rdc_tbl_bind(nxge, grp_index);
915 		if (index < 0) {
916 			NXGE_ERROR_MSG((nxge, OBP_CTL,
917 			    "nxge_fzc_rdc_tbl_init: "
918 			    "there are no free RDC tables!"));
919 			return (index);
920 		}
921 	}
922 
923 	hardware->grpids[index] = nxge->function_num + 256;
924 
925 	return (index);
926 }
927 
928 int
nxge_fzc_rdc_tbl_unbind(p_nxge_t nxge,int rdc_tbl)929 nxge_fzc_rdc_tbl_unbind(p_nxge_t nxge, int rdc_tbl)
930 {
931 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
932 	nx_rdc_tbl_t *table;
933 
934 	if (nhd == NULL)
935 		return (0);
936 
937 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "==> nxge_fzc_rdc_tbl_unbind(%d)",
938 	    rdc_tbl));
939 
940 	MUTEX_ENTER(&nhd->lock);
941 	table = &nhd->rdc_tbl[rdc_tbl];
942 	if (table->nxge != (uintptr_t)nxge) {
943 		NXGE_ERROR_MSG((nxge, DMA_CTL,
944 		    "nxge_fzc_rdc_tbl_unbind(%d): func%d not owner",
945 		    nxge->function_num, rdc_tbl));
946 		MUTEX_EXIT(&nhd->lock);
947 		return (EINVAL);
948 	} else {
949 		bzero(table, sizeof (*table));
950 	}
951 	MUTEX_EXIT(&nhd->lock);
952 
953 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "<== nxge_fzc_rdc_tbl_unbind(%d)",
954 	    rdc_tbl));
955 
956 	return (0);
957 }
958 
959 nxge_status_t
nxge_init_fzc_rxdma_port(p_nxge_t nxgep)960 nxge_init_fzc_rxdma_port(p_nxge_t nxgep)
961 {
962 	npi_handle_t		handle;
963 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
964 	p_nxge_hw_pt_cfg_t	p_cfgp;
965 	hostinfo_t		hostinfo;
966 	int			i;
967 	npi_status_t		rs = NPI_SUCCESS;
968 	p_nxge_class_pt_cfg_t	p_class_cfgp;
969 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_port"));
970 
971 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
972 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
973 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
974 	/*
975 	 * Initialize the port scheduler DRR weight.
976 	 * npi_rxdma_cfg_port_ddr_weight();
977 	 */
978 
979 	if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
980 	    (nxgep->mac.portmode == PORT_1G_FIBER) ||
981 	    (nxgep->mac.portmode == PORT_1G_TN1010) ||
982 	    (nxgep->mac.portmode == PORT_1G_SERDES)) {
983 		rs = npi_rxdma_cfg_port_ddr_weight(handle,
984 		    nxgep->function_num, NXGE_RX_DRR_WT_1G);
985 		if (rs != NPI_SUCCESS) {
986 			return (NXGE_ERROR | rs);
987 		}
988 	}
989 
990 	/* Program the default RDC of a port */
991 	rs = npi_rxdma_cfg_default_port_rdc(handle, nxgep->function_num,
992 	    p_cfgp->def_rdc);
993 	if (rs != NPI_SUCCESS) {
994 		return (NXGE_ERROR | rs);
995 	}
996 
997 	/*
998 	 * Configure the MAC host info table with RDC tables
999 	 */
1000 	hostinfo.value = 0;
1001 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1002 	for (i = 0; i < p_cfgp->max_macs; i++) {
1003 		hostinfo.bits.w0.rdc_tbl_num = p_cfgp->def_mac_rxdma_grpid;
1004 		hostinfo.bits.w0.mac_pref = p_cfgp->mac_pref;
1005 		if (p_class_cfgp->mac_host_info[i].flag) {
1006 			hostinfo.bits.w0.rdc_tbl_num =
1007 			    p_class_cfgp->mac_host_info[i].rdctbl;
1008 			hostinfo.bits.w0.mac_pref =
1009 			    p_class_cfgp->mac_host_info[i].mpr_npr;
1010 		}
1011 
1012 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
1013 		    nxgep->function_num, i, &hostinfo);
1014 		if (rs != NPI_SUCCESS)
1015 			return (NXGE_ERROR | rs);
1016 	}
1017 
1018 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1019 	    "<== nxge_init_fzc_rxdma_port rs 0x%08x", rs));
1020 
1021 	return (NXGE_OK);
1022 
1023 }
1024 
1025 nxge_status_t
nxge_fzc_dmc_def_port_rdc(p_nxge_t nxgep,uint8_t port,uint16_t rdc)1026 nxge_fzc_dmc_def_port_rdc(p_nxge_t nxgep, uint8_t port, uint16_t rdc)
1027 {
1028 	npi_status_t rs = NPI_SUCCESS;
1029 	rs = npi_rxdma_cfg_default_port_rdc(nxgep->npi_reg_handle,
1030 	    port, rdc);
1031 	if (rs & NPI_FAILURE)
1032 		return (NXGE_ERROR | rs);
1033 	return (NXGE_OK);
1034 }
1035 
1036 /*
1037  * nxge_init_fzc_tdc_pages
1038  *
1039  *	Configure a TDC's logical pages.
1040  *
1041  *	This function is executed by the service domain, on behalf of
1042  *	a guest domain, to whom this TDC has been loaned.
1043  *
1044  * Arguments:
1045  *	nxgep
1046  *	channel		The channel to initialize.
1047  *	page0		Logical page 0 definition.
1048  *	page1		Logical page 1 definition.
1049  *
1050  * Notes:
1051  *	I think that this function can be called from any
1052  *	domain, but I need to check.
1053  *
1054  * NPI/NXGE function calls:
1055  *	hv_niu_tx_logical_page_conf()
1056  *	hv_niu_tx_logical_page_info()
1057  *
1058  * Context:
1059  *	Any domain
1060  */
1061 nxge_status_t
nxge_init_fzc_tdc_pages(p_nxge_t nxgep,uint16_t channel,dma_log_page_t * page0,dma_log_page_t * page1)1062 nxge_init_fzc_tdc_pages(
1063 	p_nxge_t nxgep,
1064 	uint16_t channel,
1065 	dma_log_page_t *page0,
1066 	dma_log_page_t *page1)
1067 {
1068 	npi_handle_t handle;
1069 	npi_status_t rs;
1070 
1071 	log_page_hdl_t page_handle;
1072 
1073 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1074 	    "==> nxge_init_fzc_txdma_channel_pages"));
1075 
1076 #ifndef	NIU_HV_WORKAROUND
1077 	if (nxgep->niu_type == N2_NIU) {
1078 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1079 		    "<== nxge_init_fzc_tdc_pages: "
1080 		    "N2_NIU: no need to set txdma logical pages"));
1081 		return (NXGE_OK);
1082 	}
1083 #else
1084 	if (nxgep->niu_type == N2_NIU) {
1085 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1086 		    "<== nxge_init_fzc_tdc_pages: "
1087 		    "N2_NIU: NEED to set txdma logical pages"));
1088 	}
1089 #endif
1090 
1091 	/*
1092 	 * Initialize logical page 1.
1093 	 */
1094 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1095 	if ((rs = npi_txdma_log_page_set(handle, channel, page0))
1096 	    != NPI_SUCCESS)
1097 		return (NXGE_ERROR | rs);
1098 
1099 	/*
1100 	 * Initialize logical page 2.
1101 	 */
1102 	if ((rs = npi_txdma_log_page_set(handle, channel, page1))
1103 	    != NPI_SUCCESS)
1104 		return (NXGE_ERROR | rs);
1105 
1106 	/*
1107 	 * Initialize the page handle.
1108 	 * (In the current driver, this is always set to 0.)
1109 	 */
1110 	page_handle.value = 0;
1111 	rs = npi_txdma_log_page_handle_set(handle, channel, &page_handle);
1112 	if (rs == NPI_SUCCESS) {
1113 		return (NXGE_OK);
1114 	} else {
1115 		return (NXGE_ERROR | rs);
1116 	}
1117 }
1118 
1119 nxge_status_t
nxge_init_fzc_txdma_channel_pages(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p)1120 nxge_init_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
1121     p_tx_ring_t tx_ring_p)
1122 {
1123 	npi_handle_t		handle;
1124 	dma_log_page_t		cfg;
1125 	npi_status_t		rs = NPI_SUCCESS;
1126 
1127 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1128 	    "==> nxge_init_fzc_txdma_channel_pages"));
1129 
1130 #ifndef	NIU_HV_WORKAROUND
1131 	if (nxgep->niu_type == N2_NIU) {
1132 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1133 		    "<== nxge_init_fzc_txdma_channel_pages: "
1134 		    "N2_NIU: no need to set txdma logical pages"));
1135 		return (NXGE_OK);
1136 	}
1137 #else
1138 	if (nxgep->niu_type == N2_NIU) {
1139 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1140 		    "<== nxge_init_fzc_txdma_channel_pages: "
1141 		    "N2_NIU: NEED to set txdma logical pages"));
1142 	}
1143 #endif
1144 
1145 	/*
1146 	 * Initialize logical page 1.
1147 	 */
1148 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1149 	cfg.func_num = nxgep->function_num;
1150 	cfg.page_num = 0;
1151 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page0;
1152 	cfg.value = tx_ring_p->page_value_1.value;
1153 	cfg.mask = tx_ring_p->page_mask_1.value;
1154 	cfg.reloc = tx_ring_p->page_reloc_1.value;
1155 
1156 	rs = npi_txdma_log_page_set(handle, channel,
1157 	    (p_dma_log_page_t)&cfg);
1158 	if (rs != NPI_SUCCESS) {
1159 		return (NXGE_ERROR | rs);
1160 	}
1161 
1162 	/*
1163 	 * Initialize logical page 2.
1164 	 */
1165 	cfg.page_num = 1;
1166 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page1;
1167 	cfg.value = tx_ring_p->page_value_2.value;
1168 	cfg.mask = tx_ring_p->page_mask_2.value;
1169 	cfg.reloc = tx_ring_p->page_reloc_2.value;
1170 
1171 	rs = npi_txdma_log_page_set(handle, channel, &cfg);
1172 	if (rs != NPI_SUCCESS) {
1173 		return (NXGE_ERROR | rs);
1174 	}
1175 
1176 	/* Initialize the page handle */
1177 	rs = npi_txdma_log_page_handle_set(handle, channel,
1178 	    &tx_ring_p->page_hdl);
1179 
1180 	if (rs == NPI_SUCCESS) {
1181 		return (NXGE_OK);
1182 	} else {
1183 		return (NXGE_ERROR | rs);
1184 	}
1185 }
1186 
1187 
1188 nxge_status_t
nxge_init_fzc_txdma_channel_drr(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p)1189 nxge_init_fzc_txdma_channel_drr(p_nxge_t nxgep, uint16_t channel,
1190     p_tx_ring_t tx_ring_p)
1191 {
1192 	npi_status_t	rs = NPI_SUCCESS;
1193 	npi_handle_t	handle;
1194 
1195 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1196 	rs = npi_txc_dma_max_burst_set(handle, channel,
1197 	    tx_ring_p->max_burst.value);
1198 	if (rs == NPI_SUCCESS) {
1199 		return (NXGE_OK);
1200 	} else {
1201 		return (NXGE_ERROR | rs);
1202 	}
1203 }
1204 
1205 nxge_status_t
nxge_fzc_sys_err_mask_set(p_nxge_t nxgep,uint64_t mask)1206 nxge_fzc_sys_err_mask_set(p_nxge_t nxgep, uint64_t mask)
1207 {
1208 	npi_status_t	rs = NPI_SUCCESS;
1209 	npi_handle_t	handle;
1210 
1211 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1212 	rs = npi_fzc_sys_err_mask_set(handle, mask);
1213 	if (rs == NPI_SUCCESS) {
1214 		return (NXGE_OK);
1215 	} else {
1216 		return (NXGE_ERROR | rs);
1217 	}
1218 }
1219 
1220 /*
1221  * nxge_init_hv_fzc_txdma_channel_pages
1222  *
1223  *	Configure a TDC's logical pages.
1224  *
1225  * Arguments:
1226  *	nxgep
1227  *	channel		The channel to initialize.
1228  *	tx_ring_p	The transmit ring.
1229  *
1230  * Notes:
1231  *	I think that this function can be called from any
1232  *	domain, but I need to check.
1233  *
1234  * NPI/NXGE function calls:
1235  *	hv_niu_tx_logical_page_conf()
1236  *	hv_niu_tx_logical_page_info()
1237  *
1238  * Context:
1239  *	Any domain
1240  */
1241 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
1242 nxge_status_t
nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p)1243 nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
1244     p_tx_ring_t tx_ring_p)
1245 {
1246 	int			err;
1247 	uint64_t		hverr;
1248 #ifdef	DEBUG
1249 	uint64_t		ra, size;
1250 #endif
1251 
1252 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1253 	    "==> nxge_init_hv_fzc_txdma_channel_pages"));
1254 
1255 	if (tx_ring_p->hv_set) {
1256 		return (NXGE_OK);
1257 	}
1258 
1259 	/*
1260 	 * Initialize logical page 1 for data buffers.
1261 	 */
1262 	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1263 	    (uint64_t)0, N2NIU_TX_LP_CONF,
1264 	    tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1265 	    tx_ring_p->hv_tx_buf_ioaddr_size);
1266 
1267 	err = (nxge_status_t)nxge_herr2kerr(hverr);
1268 	if (err != 0) {
1269 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1270 		    "<== nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1271 		    "error status 0x%x "
1272 		    "(page 0 data buf) hverr 0x%llx "
1273 		    "ioaddr_pp $%p "
1274 		    "size 0x%llx ",
1275 		    channel,
1276 		    err,
1277 		    hverr,
1278 		    tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1279 		    tx_ring_p->hv_tx_buf_ioaddr_size));
1280 		return (NXGE_ERROR | err);
1281 	}
1282 
1283 #ifdef	DEBUG
1284 	ra = size = 0;
1285 	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1286 	    (uint64_t)0, N2NIU_TX_LP_INFO,
1287 	    (uint64_t)&ra, (uint64_t)&size);
1288 
1289 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1290 	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1291 	    "ok status 0x%x "
1292 	    "(page 0 data buf) hverr 0x%llx "
1293 	    "set ioaddr_pp $%p "
1294 	    "set size 0x%llx "
1295 	    "get ra ioaddr_pp $%p "
1296 	    "get size 0x%llx ",
1297 	    channel,
1298 	    err,
1299 	    hverr,
1300 	    tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1301 	    tx_ring_p->hv_tx_buf_ioaddr_size,
1302 	    ra,
1303 	    size));
1304 #endif
1305 
1306 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1307 	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1308 	    "(page 0 data buf) hverr 0x%llx "
1309 	    "ioaddr_pp $%p "
1310 	    "size 0x%llx ",
1311 	    channel,
1312 	    hverr,
1313 	    tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1314 	    tx_ring_p->hv_tx_buf_ioaddr_size));
1315 
1316 	/*
1317 	 * Initialize logical page 2 for control buffers.
1318 	 */
1319 	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1320 	    (uint64_t)1, N2NIU_TX_LP_CONF,
1321 	    tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1322 	    tx_ring_p->hv_tx_cntl_ioaddr_size);
1323 
1324 	err = (nxge_status_t)nxge_herr2kerr(hverr);
1325 
1326 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1327 	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d"
1328 	    "ok status 0x%x "
1329 	    "(page 1 cntl buf) hverr 0x%llx "
1330 	    "ioaddr_pp $%p "
1331 	    "size 0x%llx ",
1332 	    channel,
1333 	    err,
1334 	    hverr,
1335 	    tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1336 	    tx_ring_p->hv_tx_cntl_ioaddr_size));
1337 
1338 	if (err != 0) {
1339 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1340 		    "<== nxge_init_hv_fzc_txdma_channel_pages: channel %d"
1341 		    "error status 0x%x "
1342 		    "(page 1 cntl buf) hverr 0x%llx "
1343 		    "ioaddr_pp $%p "
1344 		    "size 0x%llx ",
1345 		    channel,
1346 		    err,
1347 		    hverr,
1348 		    tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1349 		    tx_ring_p->hv_tx_cntl_ioaddr_size));
1350 		return (NXGE_ERROR | err);
1351 	}
1352 
1353 #ifdef	DEBUG
1354 	ra = size = 0;
1355 	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1356 	    (uint64_t)1, N2NIU_TX_LP_INFO,
1357 	    (uint64_t)&ra, (uint64_t)&size);
1358 
1359 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1360 	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1361 	    "(page 1 cntl buf) hverr 0x%llx "
1362 	    "set ioaddr_pp $%p "
1363 	    "set size 0x%llx "
1364 	    "get ra ioaddr_pp $%p "
1365 	    "get size 0x%llx ",
1366 	    channel,
1367 	    hverr,
1368 	    tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1369 	    tx_ring_p->hv_tx_cntl_ioaddr_size,
1370 	    ra,
1371 	    size));
1372 #endif
1373 
1374 	tx_ring_p->hv_set = B_TRUE;
1375 
1376 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1377 	    "<== nxge_init_hv_fzc_txdma_channel_pages"));
1378 
1379 	return (NXGE_OK);
1380 }
1381 
1382 /*ARGSUSED*/
1383 nxge_status_t
nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbrp)1384 nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t nxgep,
1385     uint16_t channel, p_rx_rbr_ring_t rbrp)
1386 {
1387 	int			err;
1388 	uint64_t		hverr;
1389 #ifdef	DEBUG
1390 	uint64_t		ra, size;
1391 #endif
1392 
1393 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1394 	    "==> nxge_init_hv_fzc_rxdma_channel_pages"));
1395 
1396 	if (rbrp->hv_set) {
1397 		return (NXGE_OK);
1398 	}
1399 
1400 	/* Initialize data buffers for page 0 */
1401 	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1402 	    (uint64_t)0, N2NIU_RX_LP_CONF,
1403 	    rbrp->hv_rx_buf_base_ioaddr_pp,
1404 	    rbrp->hv_rx_buf_ioaddr_size);
1405 
1406 	err = (nxge_status_t)nxge_herr2kerr(hverr);
1407 	if (err != 0) {
1408 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1409 		    "<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
1410 		    "error status 0x%x "
1411 		    "(page 0 data buf) hverr 0x%llx "
1412 		    "ioaddr_pp $%p "
1413 		    "size 0x%llx ",
1414 		    channel,
1415 		    err,
1416 		    hverr,
1417 		    rbrp->hv_rx_buf_base_ioaddr_pp,
1418 		    rbrp->hv_rx_buf_ioaddr_size));
1419 
1420 		return (NXGE_ERROR | err);
1421 	}
1422 
1423 #ifdef	DEBUG
1424 	ra = size = 0;
1425 	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1426 	    (uint64_t)0, N2NIU_RX_LP_INFO,
1427 	    (uint64_t)&ra, (uint64_t)&size);
1428 
1429 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1430 	    "==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
1431 	    "ok status 0x%x "
1432 	    "(page 0 data buf) hverr 0x%llx "
1433 	    "set databuf ioaddr_pp $%p "
1434 	    "set databuf size 0x%llx "
1435 	    "get databuf ra ioaddr_pp %p "
1436 	    "get databuf size 0x%llx",
1437 	    channel,
1438 	    err,
1439 	    hverr,
1440 	    rbrp->hv_rx_buf_base_ioaddr_pp,
1441 	    rbrp->hv_rx_buf_ioaddr_size,
1442 	    ra,
1443 	    size));
1444 #endif
1445 
1446 	/* Initialize control buffers for logical page 1.  */
1447 	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1448 	    (uint64_t)1, N2NIU_RX_LP_CONF,
1449 	    rbrp->hv_rx_cntl_base_ioaddr_pp,
1450 	    rbrp->hv_rx_cntl_ioaddr_size);
1451 
1452 	err = (nxge_status_t)nxge_herr2kerr(hverr);
1453 	if (err != 0) {
1454 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1455 		    "<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
1456 		    "error status 0x%x "
1457 		    "(page 1 cntl buf) hverr 0x%llx "
1458 		    "ioaddr_pp $%p "
1459 		    "size 0x%llx ",
1460 		    channel,
1461 		    err,
1462 		    hverr,
1463 		    rbrp->hv_rx_buf_base_ioaddr_pp,
1464 		    rbrp->hv_rx_buf_ioaddr_size));
1465 
1466 		return (NXGE_ERROR | err);
1467 	}
1468 
1469 #ifdef	DEBUG
1470 	ra = size = 0;
1471 	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1472 	    (uint64_t)1, N2NIU_RX_LP_INFO,
1473 	    (uint64_t)&ra, (uint64_t)&size);
1474 
1475 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1476 	    "==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
1477 	    "error status 0x%x "
1478 	    "(page 1 cntl buf) hverr 0x%llx "
1479 	    "set cntl ioaddr_pp $%p "
1480 	    "set cntl size 0x%llx "
1481 	    "get cntl ioaddr_pp $%p "
1482 	    "get cntl size 0x%llx ",
1483 	    channel,
1484 	    err,
1485 	    hverr,
1486 	    rbrp->hv_rx_cntl_base_ioaddr_pp,
1487 	    rbrp->hv_rx_cntl_ioaddr_size,
1488 	    ra,
1489 	    size));
1490 #endif
1491 
1492 	rbrp->hv_set = B_FALSE;
1493 
1494 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1495 	    "<== nxge_init_hv_fzc_rxdma_channel_pages"));
1496 
1497 	return (NXGE_OK);
1498 }
1499 
1500 /*
1501  * Map hypervisor error code to errno. Only
1502  * H_ENORADDR, H_EBADALIGN and H_EINVAL are meaningful
1503  * for niu driver. Any other error codes are mapped to EINVAL.
1504  */
1505 static int
nxge_herr2kerr(uint64_t hv_errcode)1506 nxge_herr2kerr(uint64_t hv_errcode)
1507 {
1508 	int	s_errcode;
1509 
1510 	switch (hv_errcode) {
1511 	case H_ENORADDR:
1512 	case H_EBADALIGN:
1513 		s_errcode = EFAULT;
1514 		break;
1515 	case H_EOK:
1516 		s_errcode = 0;
1517 		break;
1518 	default:
1519 		s_errcode = EINVAL;
1520 		break;
1521 	}
1522 	return (s_errcode);
1523 }
1524 
1525 uint64_t
nxge_init_hv_fzc_lp_op(p_nxge_t nxgep,uint64_t channel,uint64_t page_no,uint64_t op_type,uint64_t ioaddr_pp,uint64_t ioaddr_size)1526 nxge_init_hv_fzc_lp_op(p_nxge_t nxgep, uint64_t channel,
1527     uint64_t page_no, uint64_t op_type,
1528     uint64_t ioaddr_pp, uint64_t ioaddr_size)
1529 {
1530 	uint64_t		hverr;
1531 	uint64_t		major;
1532 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1533 	nxhv_dc_fp_t		*io_fp;
1534 
1535 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1536 	    "==> nxge_init_hv_fzc_lp_op"));
1537 
1538 	major = nxgep->niu_hsvc.hsvc_major;
1539 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1540 	    "==> nxge_init_hv_fzc_lp_op (major %d): channel %d op_type 0x%x "
1541 	    "page_no %d ioaddr_pp $%p ioaddr_size 0x%llx",
1542 	    major, channel, op_type, page_no, ioaddr_pp, ioaddr_size));
1543 
1544 	/* Call the transmit conf function. */
1545 	switch (major) {
1546 	case NIU_MAJOR_VER: /* 1 */
1547 		switch (op_type) {
1548 		case N2NIU_TX_LP_CONF:
1549 			io_fp = &nhd->hio.tx;
1550 			hverr = (*io_fp->lp_conf)((uint64_t)channel,
1551 			    (uint64_t)page_no,
1552 			    (uint64_t)ioaddr_pp,
1553 			    (uint64_t)ioaddr_size);
1554 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1555 			    "==> nxge_init_hv_fzc_lp_op(tx_conf): major %d "
1556 			    "op 0x%x hverr 0x%x", major, op_type, hverr));
1557 			break;
1558 
1559 		case N2NIU_TX_LP_INFO:
1560 			io_fp = &nhd->hio.tx;
1561 			hverr = (*io_fp->lp_info)((uint64_t)channel,
1562 			    (uint64_t)page_no,
1563 			    (uint64_t *)ioaddr_pp,
1564 			    (uint64_t *)ioaddr_size);
1565 			break;
1566 
1567 		case N2NIU_RX_LP_CONF:
1568 			io_fp = &nhd->hio.rx;
1569 			hverr = (*io_fp->lp_conf)((uint64_t)channel,
1570 			    (uint64_t)page_no,
1571 			    (uint64_t)ioaddr_pp,
1572 			    (uint64_t)ioaddr_size);
1573 			break;
1574 
1575 		case N2NIU_RX_LP_INFO:
1576 			io_fp = &nhd->hio.rx;
1577 			hverr = (*io_fp->lp_info)((uint64_t)channel,
1578 			    (uint64_t)page_no,
1579 			    (uint64_t *)ioaddr_pp,
1580 			    (uint64_t *)ioaddr_size);
1581 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1582 			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
1583 			    "op 0x%x hverr 0x%x", major, op_type, hverr));
1584 			break;
1585 
1586 		default:
1587 			hverr = EINVAL;
1588 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1589 			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
1590 			    "invalid op 0x%x hverr 0x%x", major,
1591 			    op_type, hverr));
1592 			break;
1593 		}
1594 
1595 		break;
1596 
1597 	case NIU_MAJOR_VER_2: /* 2 */
1598 		switch (op_type) {
1599 		case N2NIU_TX_LP_CONF:
1600 			io_fp = &nhd->hio.tx;
1601 			hverr = (*io_fp->lp_cfgh_conf)(nxgep->niu_cfg_hdl,
1602 			    (uint64_t)channel,
1603 			    (uint64_t)page_no, ioaddr_pp, ioaddr_size);
1604 
1605 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1606 			    "==> nxge_init_hv_fzc_lp_op(tx_conf): major %d "
1607 			    "op 0x%x hverr 0x%x", major, op_type, hverr));
1608 			break;
1609 
1610 		case N2NIU_TX_LP_INFO:
1611 			io_fp = &nhd->hio.tx;
1612 			hverr = (*io_fp->lp_cfgh_info)(nxgep->niu_cfg_hdl,
1613 			    (uint64_t)channel,
1614 			    (uint64_t)page_no,
1615 			    (uint64_t *)ioaddr_pp,
1616 			    (uint64_t *)ioaddr_size);
1617 			break;
1618 
1619 		case N2NIU_RX_LP_CONF:
1620 			io_fp = &nhd->hio.rx;
1621 			hverr = (*io_fp->lp_cfgh_conf)(nxgep->niu_cfg_hdl,
1622 			    (uint64_t)channel,
1623 			    (uint64_t)page_no,
1624 			    (uint64_t)ioaddr_pp,
1625 			    (uint64_t)ioaddr_size);
1626 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1627 			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
1628 			    "hverr 0x%x", major, hverr));
1629 			break;
1630 
1631 		case N2NIU_RX_LP_INFO:
1632 			io_fp = &nhd->hio.rx;
1633 			hverr = (*io_fp->lp_cfgh_info)(nxgep->niu_cfg_hdl,
1634 			    (uint64_t)channel,
1635 			    (uint64_t)page_no,
1636 			    (uint64_t *)ioaddr_pp,
1637 			    (uint64_t *)ioaddr_size);
1638 			break;
1639 
1640 		default:
1641 			hverr = EINVAL;
1642 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1643 			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
1644 			    "invalid op 0x%x hverr 0x%x", major,
1645 			    op_type, hverr));
1646 			break;
1647 		}
1648 
1649 		break;
1650 
1651 	default:
1652 		hverr = EINVAL;
1653 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1654 		    "==> nxge_init_hv_fzc_lp_op(rx_conf): invalid major %d "
1655 		    "op 0x%x hverr 0x%x", major, op_type, hverr));
1656 		break;
1657 	}
1658 
1659 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1660 	    "<== nxge_init_hv_fzc_lp_op: 0x%x", hverr));
1661 
1662 	return (hverr);
1663 }
1664 
1665 #endif	/* sun4v and NIU_LP_WORKAROUND */
1666