xref: /titanic_50/usr/src/uts/common/io/nxge/nxge_fzc.c (revision ecd343b647e2ba2d0bf8f09646e721f05eb752aa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include	<nxge_impl.h>
29 #include	<npi_mac.h>
30 #include	<npi_rxdma.h>
31 
32 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
33 static int	nxge_herr2kerr(uint64_t);
34 #endif
35 
36 /*
37  * The following interfaces are controlled by the
38  * function control registers. Some global registers
39  * are to be initialized by only byt one of the 2/4 functions.
40  * Use the test and set register.
41  */
42 /*ARGSUSED*/
43 nxge_status_t
44 nxge_test_and_set(p_nxge_t nxgep, uint8_t tas)
45 {
46 	npi_handle_t		handle;
47 	npi_status_t		rs = NPI_SUCCESS;
48 
49 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
50 	if ((rs = npi_dev_func_sr_sr_get_set_clear(handle, tas))
51 			!= NPI_SUCCESS) {
52 		return (NXGE_ERROR | rs);
53 	}
54 
55 	return (NXGE_OK);
56 }
57 
58 nxge_status_t
59 nxge_set_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t mpc)
60 {
61 	npi_handle_t		handle;
62 	npi_status_t		rs = NPI_SUCCESS;
63 
64 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_set_fzc_multi_part_ctl"));
65 
66 	/*
67 	 * In multi-partitioning, the partition manager
68 	 * who owns function zero should set this multi-partition
69 	 * control bit.
70 	 */
71 	if (nxgep->use_partition && nxgep->function_num) {
72 		return (NXGE_ERROR);
73 	}
74 
75 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
76 	if ((rs = npi_fzc_mpc_set(handle, mpc)) != NPI_SUCCESS) {
77 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
78 			"<== nxge_set_fzc_multi_part_ctl"));
79 		return (NXGE_ERROR | rs);
80 	}
81 
82 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_set_fzc_multi_part_ctl"));
83 
84 	return (NXGE_OK);
85 }
86 
87 nxge_status_t
88 nxge_get_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t *mpc_p)
89 {
90 	npi_handle_t		handle;
91 	npi_status_t		rs = NPI_SUCCESS;
92 
93 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
94 
95 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
96 	if ((rs = npi_fzc_mpc_get(handle, mpc_p)) != NPI_SUCCESS) {
97 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
98 			"<== nxge_set_fzc_multi_part_ctl"));
99 		return (NXGE_ERROR | rs);
100 	}
101 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
102 
103 	return (NXGE_OK);
104 }
105 
106 /*
107  * System interrupt registers that are under function zero
108  * management.
109  */
110 nxge_status_t
111 nxge_fzc_intr_init(p_nxge_t nxgep)
112 {
113 	nxge_status_t	status = NXGE_OK;
114 
115 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_init"));
116 
117 	/* Configure the initial timer resolution */
118 	if ((status = nxge_fzc_intr_tmres_set(nxgep)) != NXGE_OK) {
119 		return (status);
120 	}
121 
122 	switch (nxgep->niu_type) {
123 	case NEPTUNE:
124 	case NEPTUNE_2:
125 		/*
126 		 * Set up the logical device group's logical devices that
127 		 * the group owns.
128 		 */
129 		if ((status = nxge_fzc_intr_ldg_num_set(nxgep))
130 				!= NXGE_OK) {
131 			break;
132 		}
133 
134 		/* Configure the system interrupt data */
135 		if ((status = nxge_fzc_intr_sid_set(nxgep)) != NXGE_OK) {
136 			break;
137 		}
138 
139 		break;
140 	}
141 
142 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_init"));
143 
144 	return (status);
145 }
146 
147 nxge_status_t
148 nxge_fzc_intr_ldg_num_set(p_nxge_t nxgep)
149 {
150 	p_nxge_ldg_t	ldgp;
151 	p_nxge_ldv_t	ldvp;
152 	npi_handle_t	handle;
153 	int		i, j;
154 	npi_status_t	rs = NPI_SUCCESS;
155 
156 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_ldg_num_set"));
157 
158 	if (nxgep->ldgvp == NULL) {
159 		return (NXGE_ERROR);
160 	}
161 
162 	ldgp = nxgep->ldgvp->ldgp;
163 	ldvp = nxgep->ldgvp->ldvp;
164 	if (ldgp == NULL || ldvp == NULL) {
165 		return (NXGE_ERROR);
166 	}
167 
168 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
169 
170 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
171 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
172 			"==> nxge_fzc_intr_ldg_num_set "
173 			"<== nxge_f(Neptune): # ldv %d "
174 			"in group %d", ldgp->nldvs, ldgp->ldg));
175 
176 		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
177 			rs = npi_fzc_ldg_num_set(handle, ldvp->ldv,
178 				ldvp->ldg_assigned);
179 			if (rs != NPI_SUCCESS) {
180 				NXGE_DEBUG_MSG((nxgep, INT_CTL,
181 					"<== nxge_fzc_intr_ldg_num_set failed "
182 					" rs 0x%x ldv %d ldg %d",
183 					rs, ldvp->ldv, ldvp->ldg_assigned));
184 				return (NXGE_ERROR | rs);
185 			}
186 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
187 				"<== nxge_fzc_intr_ldg_num_set OK "
188 				" ldv %d ldg %d",
189 				ldvp->ldv, ldvp->ldg_assigned));
190 		}
191 	}
192 
193 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_ldg_num_set"));
194 
195 	return (NXGE_OK);
196 }
197 
198 nxge_status_t
199 nxge_fzc_intr_tmres_set(p_nxge_t nxgep)
200 {
201 	npi_handle_t	handle;
202 	npi_status_t	rs = NPI_SUCCESS;
203 
204 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_tmrese_set"));
205 	if (nxgep->ldgvp == NULL) {
206 		return (NXGE_ERROR);
207 	}
208 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
209 	if ((rs = npi_fzc_ldg_timer_res_set(handle, nxgep->ldgvp->tmres))) {
210 		return (NXGE_ERROR | rs);
211 	}
212 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_tmrese_set"));
213 
214 	return (NXGE_OK);
215 }
216 
217 nxge_status_t
218 nxge_fzc_intr_sid_set(p_nxge_t nxgep)
219 {
220 	npi_handle_t	handle;
221 	p_nxge_ldg_t	ldgp;
222 	fzc_sid_t	sid;
223 	int		i;
224 	npi_status_t	rs = NPI_SUCCESS;
225 
226 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_sid_set"));
227 	if (nxgep->ldgvp == NULL) {
228 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
229 			"<== nxge_fzc_intr_sid_set: no ldg"));
230 		return (NXGE_ERROR);
231 	}
232 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
233 	ldgp = nxgep->ldgvp->ldgp;
234 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
235 		"==> nxge_fzc_intr_sid_set: #int %d", nxgep->ldgvp->ldg_intrs));
236 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
237 		sid.ldg = ldgp->ldg;
238 		sid.niu = B_FALSE;
239 		sid.func = ldgp->func;
240 		sid.vector = ldgp->vector;
241 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
242 			"==> nxge_fzc_intr_sid_set(%d): func %d group %d "
243 			"vector %d",
244 			i, sid.func, sid.ldg, sid.vector));
245 		rs = npi_fzc_sid_set(handle, sid);
246 		if (rs != NPI_SUCCESS) {
247 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
248 				"<== nxge_fzc_intr_sid_set:failed 0x%x",
249 				rs));
250 			return (NXGE_ERROR | rs);
251 		}
252 	}
253 
254 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_sid_set"));
255 
256 	return (NXGE_OK);
257 
258 }
259 
260 /*
261  * Receive DMA registers that are under function zero
262  * management.
263  */
264 /*ARGSUSED*/
265 nxge_status_t
266 nxge_init_fzc_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
267 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
268 {
269 	nxge_status_t	status = NXGE_OK;
270 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_init_fzc_rxdma_channel"));
271 
272 	switch (nxgep->niu_type) {
273 	case NEPTUNE:
274 	case NEPTUNE_2:
275 	default:
276 		/* Initialize the RXDMA logical pages */
277 		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
278 			rbr_p);
279 		if (status != NXGE_OK) {
280 			return (status);
281 		}
282 
283 		break;
284 
285 #ifndef	NIU_HV_WORKAROUND
286 	case N2_NIU:
287 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
288 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
289 			"==> nxge_init_fzc_rxdma_channel: N2_NIU - call HV "
290 			"set up logical pages"));
291 		/* Initialize the RXDMA logical pages */
292 		status = nxge_init_hv_fzc_rxdma_channel_pages(nxgep, channel,
293 			rbr_p);
294 		if (status != NXGE_OK) {
295 			return (status);
296 		}
297 #endif
298 		break;
299 #else
300 	case N2_NIU:
301 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
302 			"==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
303 			"set up logical pages"));
304 		/* Initialize the RXDMA logical pages */
305 		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
306 			rbr_p);
307 		if (status != NXGE_OK) {
308 			return (status);
309 		}
310 
311 		break;
312 #endif
313 	}
314 
315 	/* Configure RED parameters */
316 	status = nxge_init_fzc_rxdma_channel_red(nxgep, channel, rcr_p);
317 
318 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_init_fzc_rxdma_channel"));
319 	return (status);
320 }
321 
322 /*ARGSUSED*/
323 nxge_status_t
324 nxge_init_fzc_rxdma_channel_pages(p_nxge_t nxgep,
325 		uint16_t channel, p_rx_rbr_ring_t rbrp)
326 {
327 	npi_handle_t		handle;
328 	dma_log_page_t		cfg;
329 	npi_status_t		rs = NPI_SUCCESS;
330 
331 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
332 		"==> nxge_init_fzc_rxdma_channel_pages"));
333 
334 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
335 	/*
336 	 * Initialize logical page 1.
337 	 */
338 	cfg.func_num = nxgep->function_num;
339 	cfg.page_num = 0;
340 	cfg.valid = rbrp->page_valid.bits.ldw.page0;
341 	cfg.value = rbrp->page_value_1.value;
342 	cfg.mask = rbrp->page_mask_1.value;
343 	cfg.reloc = rbrp->page_reloc_1.value;
344 	rs = npi_rxdma_cfg_logical_page(handle, channel,
345 			(p_dma_log_page_t)&cfg);
346 	if (rs != NPI_SUCCESS) {
347 		return (NXGE_ERROR | rs);
348 	}
349 
350 	/*
351 	 * Initialize logical page 2.
352 	 */
353 	cfg.page_num = 1;
354 	cfg.valid = rbrp->page_valid.bits.ldw.page1;
355 	cfg.value = rbrp->page_value_2.value;
356 	cfg.mask = rbrp->page_mask_2.value;
357 	cfg.reloc = rbrp->page_reloc_2.value;
358 
359 	rs = npi_rxdma_cfg_logical_page(handle, channel, &cfg);
360 	if (rs != NPI_SUCCESS) {
361 		return (NXGE_ERROR | rs);
362 	}
363 
364 	/* Initialize the page handle */
365 	rs = npi_rxdma_cfg_logical_page_handle(handle, channel,
366 			rbrp->page_hdl.bits.ldw.handle);
367 
368 	if (rs != NPI_SUCCESS) {
369 		return (NXGE_ERROR | rs);
370 	}
371 
372 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
373 		"<== nxge_init_fzc_rxdma_channel_pages"));
374 
375 	return (NXGE_OK);
376 }
377 
378 /*ARGSUSED*/
379 nxge_status_t
380 nxge_init_fzc_rxdma_channel_red(p_nxge_t nxgep,
381 	uint16_t channel, p_rx_rcr_ring_t rcr_p)
382 {
383 	npi_handle_t		handle;
384 	rdc_red_para_t		red;
385 	npi_status_t		rs = NPI_SUCCESS;
386 
387 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_channel_red"));
388 
389 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
390 	red.value = 0;
391 	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
392 	red.bits.ldw.thre = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
393 	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
394 	red.bits.ldw.thre_sync = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
395 
396 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
397 		"==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
398 		red.bits.ldw.thre_sync,
399 		red.bits.ldw.thre_sync));
400 
401 	rs = npi_rxdma_cfg_wred_param(handle, channel, &red);
402 	if (rs != NPI_SUCCESS) {
403 		return (NXGE_ERROR | rs);
404 	}
405 
406 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
407 		"<== nxge_init_fzc_rxdma_channel_red"));
408 
409 	return (NXGE_OK);
410 }
411 
412 /*ARGSUSED*/
413 nxge_status_t
414 nxge_init_fzc_txdma_channel(p_nxge_t nxgep, uint16_t channel,
415 	p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p)
416 {
417 	nxge_status_t	status = NXGE_OK;
418 
419 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
420 		"==> nxge_init_fzc_txdma_channel"));
421 
422 	switch (nxgep->niu_type) {
423 	case NEPTUNE:
424 	case NEPTUNE_2:
425 	default:
426 		/* Initialize the TXDMA logical pages */
427 		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
428 			tx_ring_p);
429 		break;
430 
431 #ifndef	NIU_HV_WORKAROUND
432 	case N2_NIU:
433 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
434 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
435 			"==> nxge_init_fzc_txdma_channel "
436 			"N2_NIU: call HV to set up txdma logical pages"));
437 		status = nxge_init_hv_fzc_txdma_channel_pages(nxgep, channel,
438 			tx_ring_p);
439 		if (status != NXGE_OK) {
440 			return (status);
441 		}
442 #endif
443 		break;
444 #else
445 	case N2_NIU:
446 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
447 			"==> nxge_init_fzc_txdma_channel "
448 			"N2_NIU: NEED to set up txdma logical pages"));
449 		/* Initialize the TXDMA logical pages */
450 		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
451 			tx_ring_p);
452 		break;
453 #endif
454 	}
455 
456 	/*
457 	 * Configure Transmit DRR Weight parameters
458 	 * (It actually programs the TXC max burst register).
459 	 */
460 	(void) nxge_init_fzc_txdma_channel_drr(nxgep, channel, tx_ring_p);
461 
462 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
463 		"<== nxge_init_fzc_txdma_channel"));
464 	return (status);
465 }
466 
467 nxge_status_t
468 nxge_init_fzc_common(p_nxge_t nxgep)
469 {
470 	nxge_status_t	status = NXGE_OK;
471 
472 	(void) nxge_init_fzc_rx_common(nxgep);
473 
474 	return (status);
475 }
476 
477 nxge_status_t
478 nxge_init_fzc_rx_common(p_nxge_t nxgep)
479 {
480 	npi_handle_t	handle;
481 	npi_status_t	rs = NPI_SUCCESS;
482 	nxge_status_t	status = NXGE_OK;
483 	clock_t		lbolt;
484 
485 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rx_common"));
486 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
487 	if (!handle.regp) {
488 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
489 			"==> nxge_init_fzc_rx_common null ptr"));
490 		return (NXGE_ERROR);
491 	}
492 
493 	/*
494 	 * Configure the rxdma clock divider
495 	 * This is the granularity counter based on
496 	 * the hardware system clock (i.e. 300 Mhz) and
497 	 * it is running around 3 nanoseconds.
498 	 * So, set the clock divider counter to 1000 to get
499 	 * microsecond granularity.
500 	 * For example, for a 3 microsecond timeout, the timeout
501 	 * will be set to 1.
502 	 */
503 	rs = npi_rxdma_cfg_clock_div_set(handle, RXDMA_CK_DIV_DEFAULT);
504 	if (rs != NPI_SUCCESS)
505 		return (NXGE_ERROR | rs);
506 
507 #if defined(__i386)
508 	rs = npi_rxdma_cfg_32bitmode_enable(handle);
509 	if (rs != NPI_SUCCESS)
510 		return (NXGE_ERROR | rs);
511 	rs = npi_txdma_mode32_set(handle, B_TRUE);
512 	if (rs != NPI_SUCCESS)
513 		return (NXGE_ERROR | rs);
514 #endif
515 
516 	/*
517 	 * Enable WRED and program an initial value.
518 	 * Use time to set the initial random number.
519 	 */
520 	(void) drv_getparm(LBOLT, &lbolt);
521 	rs = npi_rxdma_cfg_red_rand_init(handle, (uint16_t)lbolt);
522 	if (rs != NPI_SUCCESS)
523 		return (NXGE_ERROR | rs);
524 
525 	/* Initialize the RDC tables for each group */
526 	status = nxge_init_fzc_rdc_tbl(nxgep);
527 
528 
529 	/* Ethernet Timeout Counter (?) */
530 
531 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
532 		"<== nxge_init_fzc_rx_common:status 0x%08x", status));
533 
534 	return (status);
535 }
536 
537 nxge_status_t
538 nxge_init_fzc_rdc_tbl(p_nxge_t nxgep)
539 {
540 	npi_handle_t		handle;
541 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
542 	p_nxge_hw_pt_cfg_t	p_cfgp;
543 	p_nxge_rdc_grp_t	rdc_grp_p;
544 	uint8_t 		grp_tbl_id;
545 	int			ngrps;
546 	int			i;
547 	npi_status_t		rs = NPI_SUCCESS;
548 	nxge_status_t		status = NXGE_OK;
549 
550 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rdc_tbl"));
551 
552 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
553 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
554 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
555 
556 	grp_tbl_id = p_cfgp->start_rdc_grpid;
557 	rdc_grp_p = &p_dma_cfgp->rdc_grps[0];
558 	ngrps = p_cfgp->max_rdc_grpids;
559 	for (i = 0; i < ngrps; i++, rdc_grp_p++) {
560 		rs = npi_rxdma_cfg_rdc_table(handle, grp_tbl_id++,
561 			rdc_grp_p->rdc);
562 		if (rs != NPI_SUCCESS) {
563 			status = NXGE_ERROR | rs;
564 			break;
565 		}
566 	}
567 
568 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_rdc_tbl"));
569 	return (status);
570 }
571 
572 nxge_status_t
573 nxge_init_fzc_rxdma_port(p_nxge_t nxgep)
574 {
575 	npi_handle_t		handle;
576 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
577 	p_nxge_hw_pt_cfg_t	p_cfgp;
578 	hostinfo_t 		hostinfo;
579 	int			i;
580 	npi_status_t		rs = NPI_SUCCESS;
581 	p_nxge_class_pt_cfg_t 	p_class_cfgp;
582 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_port"));
583 
584 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
585 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
586 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
587 	/*
588 	 * Initialize the port scheduler DRR weight.
589 	 * npi_rxdma_cfg_port_ddr_weight();
590 	 */
591 
592 	if (nxgep->niu_type == NEPTUNE) {
593 		if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
594 			(nxgep->mac.portmode == PORT_1G_FIBER)) {
595 			rs = npi_rxdma_cfg_port_ddr_weight(handle,
596 							    nxgep->function_num,
597 							    NXGE_RX_DRR_WT_1G);
598 			if (rs != NPI_SUCCESS) {
599 				return (NXGE_ERROR | rs);
600 			}
601 		}
602 	}
603 
604 	/* Program the default RDC of a port */
605 	rs = npi_rxdma_cfg_default_port_rdc(handle, nxgep->function_num,
606 			p_cfgp->def_rdc);
607 	if (rs != NPI_SUCCESS) {
608 		return (NXGE_ERROR | rs);
609 	}
610 
611 	/*
612 	 * Configure the MAC host info table with RDC tables
613 	 */
614 	hostinfo.value = 0;
615 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
616 	for (i = 0; i < p_cfgp->max_macs; i++) {
617 		hostinfo.bits.w0.rdc_tbl_num = p_cfgp->start_rdc_grpid;
618 		hostinfo.bits.w0.mac_pref = p_cfgp->mac_pref;
619 		if (p_class_cfgp->mac_host_info[i].flag) {
620 			hostinfo.bits.w0.rdc_tbl_num =
621 				p_class_cfgp->mac_host_info[i].rdctbl;
622 			hostinfo.bits.w0.mac_pref =
623 				p_class_cfgp->mac_host_info[i].mpr_npr;
624 		}
625 
626 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
627 				nxgep->function_num, i, &hostinfo);
628 		if (rs != NPI_SUCCESS)
629 			return (NXGE_ERROR | rs);
630 	}
631 
632 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
633 		"<== nxge_init_fzc_rxdma_port rs 0x%08x", rs));
634 
635 	return (NXGE_OK);
636 
637 }
638 
639 nxge_status_t
640 nxge_fzc_dmc_def_port_rdc(p_nxge_t nxgep, uint8_t port, uint16_t rdc)
641 {
642 	npi_status_t rs = NPI_SUCCESS;
643 	rs = npi_rxdma_cfg_default_port_rdc(nxgep->npi_reg_handle,
644 				    port, rdc);
645 	if (rs & NPI_FAILURE)
646 		return (NXGE_ERROR | rs);
647 	return (NXGE_OK);
648 }
649 
650 nxge_status_t
651 nxge_init_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
652 	p_tx_ring_t tx_ring_p)
653 {
654 	npi_handle_t		handle;
655 	dma_log_page_t		cfg;
656 	npi_status_t		rs = NPI_SUCCESS;
657 
658 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
659 		"==> nxge_init_fzc_txdma_channel_pages"));
660 
661 #ifndef	NIU_HV_WORKAROUND
662 	if (nxgep->niu_type == N2_NIU) {
663 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
664 			"<== nxge_init_fzc_txdma_channel_pages: "
665 			"N2_NIU: no need to set txdma logical pages"));
666 		return (NXGE_OK);
667 	}
668 #else
669 	if (nxgep->niu_type == N2_NIU) {
670 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
671 			"<== nxge_init_fzc_txdma_channel_pages: "
672 			"N2_NIU: NEED to set txdma logical pages"));
673 	}
674 #endif
675 
676 	/*
677 	 * Initialize logical page 1.
678 	 */
679 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
680 	cfg.func_num = nxgep->function_num;
681 	cfg.page_num = 0;
682 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page0;
683 	cfg.value = tx_ring_p->page_value_1.value;
684 	cfg.mask = tx_ring_p->page_mask_1.value;
685 	cfg.reloc = tx_ring_p->page_reloc_1.value;
686 
687 	rs = npi_txdma_log_page_set(handle, channel,
688 		(p_dma_log_page_t)&cfg);
689 	if (rs != NPI_SUCCESS) {
690 		return (NXGE_ERROR | rs);
691 	}
692 
693 	/*
694 	 * Initialize logical page 2.
695 	 */
696 	cfg.page_num = 1;
697 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page1;
698 	cfg.value = tx_ring_p->page_value_2.value;
699 	cfg.mask = tx_ring_p->page_mask_2.value;
700 	cfg.reloc = tx_ring_p->page_reloc_2.value;
701 
702 	rs = npi_txdma_log_page_set(handle, channel, &cfg);
703 	if (rs != NPI_SUCCESS) {
704 		return (NXGE_ERROR | rs);
705 	}
706 
707 	/* Initialize the page handle */
708 	rs = npi_txdma_log_page_handle_set(handle, channel,
709 			&tx_ring_p->page_hdl);
710 
711 	if (rs == NPI_SUCCESS) {
712 		return (NXGE_OK);
713 	} else {
714 		return (NXGE_ERROR | rs);
715 	}
716 }
717 
718 
719 nxge_status_t
720 nxge_init_fzc_txdma_channel_drr(p_nxge_t nxgep, uint16_t channel,
721 	p_tx_ring_t tx_ring_p)
722 {
723 	npi_status_t	rs = NPI_SUCCESS;
724 	npi_handle_t	handle;
725 
726 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
727 	rs = npi_txc_dma_max_burst_set(handle, channel,
728 			tx_ring_p->max_burst.value);
729 	if (rs == NPI_SUCCESS) {
730 		return (NXGE_OK);
731 	} else {
732 		return (NXGE_ERROR | rs);
733 	}
734 }
735 
736 nxge_status_t
737 nxge_fzc_sys_err_mask_set(p_nxge_t nxgep, uint64_t mask)
738 {
739 	npi_status_t	rs = NPI_SUCCESS;
740 	npi_handle_t	handle;
741 
742 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
743 	rs = npi_fzc_sys_err_mask_set(handle, mask);
744 	if (rs == NPI_SUCCESS) {
745 		return (NXGE_OK);
746 	} else {
747 		return (NXGE_ERROR | rs);
748 	}
749 }
750 
751 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
752 nxge_status_t
753 nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
754 	p_tx_ring_t tx_ring_p)
755 {
756 	int			err;
757 	uint64_t		hverr;
758 #ifdef	DEBUG
759 	uint64_t		ra, size;
760 #endif
761 
762 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
763 		"==> nxge_init_hv_fzc_txdma_channel_pages"));
764 
765 	if (tx_ring_p->hv_set) {
766 		return (NXGE_OK);
767 	}
768 
769 	/*
770 	 * Initialize logical page 1 for data buffers.
771 	 */
772 	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
773 			(uint64_t)0,
774 			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
775 			tx_ring_p->hv_tx_buf_ioaddr_size);
776 
777 	err = (nxge_status_t)nxge_herr2kerr(hverr);
778 	if (err != 0) {
779 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
780 			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d "
781 			"error status 0x%x "
782 			"(page 0 data buf) hverr 0x%llx "
783 			"ioaddr_pp $%p "
784 			"size 0x%llx ",
785 			channel,
786 			err,
787 			hverr,
788 			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
789 			tx_ring_p->hv_tx_buf_ioaddr_size));
790 		return (NXGE_ERROR | err);
791 	}
792 
793 #ifdef	DEBUG
794 	ra = size = 0;
795 	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
796 			(uint64_t)0,
797 			&ra,
798 			&size);
799 
800 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
801 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
802 		"ok status 0x%x "
803 		"(page 0 data buf) hverr 0x%llx "
804 		"set ioaddr_pp $%p "
805 		"set size 0x%llx "
806 		"get ra ioaddr_pp $%p "
807 		"get size 0x%llx ",
808 		channel,
809 		err,
810 		hverr,
811 		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
812 		tx_ring_p->hv_tx_buf_ioaddr_size,
813 		ra,
814 		size));
815 #endif
816 
817 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
818 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
819 		"(page 0 data buf) hverr 0x%llx "
820 		"ioaddr_pp $%p "
821 		"size 0x%llx ",
822 		channel,
823 		hverr,
824 		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
825 		tx_ring_p->hv_tx_buf_ioaddr_size));
826 
827 	/*
828 	 * Initialize logical page 2 for control buffers.
829 	 */
830 	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
831 			(uint64_t)1,
832 			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
833 			tx_ring_p->hv_tx_cntl_ioaddr_size);
834 
835 	err = (nxge_status_t)nxge_herr2kerr(hverr);
836 
837 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
838 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d"
839 		"ok status 0x%x "
840 		"(page 1 cntl buf) hverr 0x%llx "
841 		"ioaddr_pp $%p "
842 		"size 0x%llx ",
843 		channel,
844 		err,
845 		hverr,
846 		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
847 		tx_ring_p->hv_tx_cntl_ioaddr_size));
848 
849 	if (err != 0) {
850 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
851 			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d"
852 			"error status 0x%x "
853 			"(page 1 cntl buf) hverr 0x%llx "
854 			"ioaddr_pp $%p "
855 			"size 0x%llx ",
856 			channel,
857 			err,
858 			hverr,
859 			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
860 			tx_ring_p->hv_tx_cntl_ioaddr_size));
861 		return (NXGE_ERROR | err);
862 	}
863 
864 #ifdef	DEBUG
865 	ra = size = 0;
866 	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
867 			(uint64_t)1,
868 			&ra,
869 			&size);
870 
871 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
872 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
873 		"(page 1 cntl buf) hverr 0x%llx "
874 		"set ioaddr_pp $%p "
875 		"set size 0x%llx "
876 		"get ra ioaddr_pp $%p "
877 		"get size 0x%llx ",
878 		channel,
879 		hverr,
880 		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
881 		tx_ring_p->hv_tx_cntl_ioaddr_size,
882 		ra,
883 		size));
884 #endif
885 
886 	tx_ring_p->hv_set = B_TRUE;
887 
888 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
889 		"<== nxge_init_hv_fzc_txdma_channel_pages"));
890 
891 	return (NXGE_OK);
892 }
893 
894 /*ARGSUSED*/
895 nxge_status_t
896 nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t nxgep,
897 		uint16_t channel, p_rx_rbr_ring_t rbrp)
898 {
899 	int			err;
900 	uint64_t		hverr;
901 #ifdef	DEBUG
902 	uint64_t		ra, size;
903 #endif
904 
905 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
906 		"==> nxge_init_hv_fzc_rxdma_channel_pages"));
907 
908 	if (rbrp->hv_set) {
909 		return (NXGE_OK);
910 	}
911 
912 	/* Initialize data buffers for page 0 */
913 	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
914 			(uint64_t)0,
915 			rbrp->hv_rx_buf_base_ioaddr_pp,
916 			rbrp->hv_rx_buf_ioaddr_size);
917 	err = (nxge_status_t)nxge_herr2kerr(hverr);
918 	if (err != 0) {
919 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
920 			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
921 			"error status 0x%x "
922 			"(page 0 data buf) hverr 0x%llx "
923 			"ioaddr_pp $%p "
924 			"size 0x%llx ",
925 			channel,
926 			err,
927 			hverr,
928 			rbrp->hv_rx_buf_base_ioaddr_pp,
929 			rbrp->hv_rx_buf_ioaddr_size));
930 
931 		return (NXGE_ERROR | err);
932 	}
933 
934 #ifdef	DEBUG
935 	ra = size = 0;
936 	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
937 			(uint64_t)0,
938 			&ra,
939 			&size);
940 
941 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
942 		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
943 		"ok status 0x%x "
944 		"(page 0 data buf) hverr 0x%llx "
945 		"set databuf ioaddr_pp $%p "
946 		"set databuf size 0x%llx "
947 		"get databuf ra ioaddr_pp %p "
948 		"get databuf size 0x%llx",
949 		channel,
950 		err,
951 		hverr,
952 		rbrp->hv_rx_buf_base_ioaddr_pp,
953 		rbrp->hv_rx_buf_ioaddr_size,
954 		ra,
955 		size));
956 #endif
957 
958 	/* Initialize control buffers for logical page 1.  */
959 	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
960 			(uint64_t)1,
961 			rbrp->hv_rx_cntl_base_ioaddr_pp,
962 			rbrp->hv_rx_cntl_ioaddr_size);
963 
964 	err = (nxge_status_t)nxge_herr2kerr(hverr);
965 	if (err != 0) {
966 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
967 			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
968 			"error status 0x%x "
969 			"(page 1 cntl buf) hverr 0x%llx "
970 			"ioaddr_pp $%p "
971 			"size 0x%llx ",
972 			channel,
973 			err,
974 			hverr,
975 			rbrp->hv_rx_buf_base_ioaddr_pp,
976 			rbrp->hv_rx_buf_ioaddr_size));
977 
978 		return (NXGE_ERROR | err);
979 	}
980 
981 #ifdef	DEBUG
982 	ra = size = 0;
983 	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
984 			(uint64_t)1,
985 			&ra,
986 			&size);
987 
988 
989 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
990 		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
991 		"error status 0x%x "
992 		"(page 1 cntl buf) hverr 0x%llx "
993 		"set cntl ioaddr_pp $%p "
994 		"set cntl size 0x%llx "
995 		"get cntl ioaddr_pp $%p "
996 		"get cntl size 0x%llx ",
997 		channel,
998 		err,
999 		hverr,
1000 		rbrp->hv_rx_cntl_base_ioaddr_pp,
1001 		rbrp->hv_rx_cntl_ioaddr_size,
1002 		ra,
1003 		size));
1004 #endif
1005 
1006 	rbrp->hv_set = B_FALSE;
1007 
1008 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1009 		"<== nxge_init_hv_fzc_rxdma_channel_pages"));
1010 
1011 	return (NXGE_OK);
1012 }
1013 
1014 /*
1015  * Map hypervisor error code to errno. Only
1016  * H_ENORADDR, H_EBADALIGN and H_EINVAL are meaningful
1017  * for niu driver. Any other error codes are mapped to EINVAL.
1018  */
1019 static int
1020 nxge_herr2kerr(uint64_t hv_errcode)
1021 {
1022 	int	s_errcode;
1023 
1024 	switch (hv_errcode) {
1025 	case H_ENORADDR:
1026 	case H_EBADALIGN:
1027 		s_errcode = EFAULT;
1028 		break;
1029 	case H_EOK:
1030 		s_errcode = 0;
1031 		break;
1032 	default:
1033 		s_errcode = EINVAL;
1034 		break;
1035 	}
1036 	return (s_errcode);
1037 }
1038 
1039 #endif	/* sun4v and NIU_LP_WORKAROUND */
1040