xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_fzc.c (revision 2e59129a8dc96d4082395c338ad696e29471d4e0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include	<nxge_impl.h>
29 #include	<npi_mac.h>
30 #include	<npi_rxdma.h>
31 
32 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
33 static int	nxge_herr2kerr(uint64_t);
34 #endif
35 
36 /*
37  * The following interfaces are controlled by the
38  * function control registers. Some global registers
39  * are to be initialized by only byt one of the 2/4 functions.
40  * Use the test and set register.
41  */
42 /*ARGSUSED*/
43 nxge_status_t
44 nxge_test_and_set(p_nxge_t nxgep, uint8_t tas)
45 {
46 	npi_handle_t		handle;
47 	npi_status_t		rs = NPI_SUCCESS;
48 
49 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
50 	if ((rs = npi_dev_func_sr_sr_get_set_clear(handle, tas))
51 			!= NPI_SUCCESS) {
52 		return (NXGE_ERROR | rs);
53 	}
54 
55 	return (NXGE_OK);
56 }
57 
58 nxge_status_t
59 nxge_set_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t mpc)
60 {
61 	npi_handle_t		handle;
62 	npi_status_t		rs = NPI_SUCCESS;
63 
64 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_set_fzc_multi_part_ctl"));
65 
66 	/*
67 	 * In multi-partitioning, the partition manager
68 	 * who owns function zero should set this multi-partition
69 	 * control bit.
70 	 */
71 	if (nxgep->use_partition && nxgep->function_num) {
72 		return (NXGE_ERROR);
73 	}
74 
75 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
76 	if ((rs = npi_fzc_mpc_set(handle, mpc)) != NPI_SUCCESS) {
77 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
78 			"<== nxge_set_fzc_multi_part_ctl"));
79 		return (NXGE_ERROR | rs);
80 	}
81 
82 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_set_fzc_multi_part_ctl"));
83 
84 	return (NXGE_OK);
85 }
86 
87 nxge_status_t
88 nxge_get_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t *mpc_p)
89 {
90 	npi_handle_t		handle;
91 	npi_status_t		rs = NPI_SUCCESS;
92 
93 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
94 
95 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
96 	if ((rs = npi_fzc_mpc_get(handle, mpc_p)) != NPI_SUCCESS) {
97 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
98 			"<== nxge_set_fzc_multi_part_ctl"));
99 		return (NXGE_ERROR | rs);
100 	}
101 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
102 
103 	return (NXGE_OK);
104 }
105 
106 /*
107  * System interrupt registers that are under function zero
108  * management.
109  */
110 nxge_status_t
111 nxge_fzc_intr_init(p_nxge_t nxgep)
112 {
113 	nxge_status_t	status = NXGE_OK;
114 
115 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_init"));
116 
117 	/* Configure the initial timer resolution */
118 	if ((status = nxge_fzc_intr_tmres_set(nxgep)) != NXGE_OK) {
119 		return (status);
120 	}
121 
122 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
123 		/*
124 		 * Set up the logical device group's logical devices that
125 		 * the group owns.
126 		 */
127 		if ((status = nxge_fzc_intr_ldg_num_set(nxgep)) != NXGE_OK)
128 			goto fzc_intr_init_exit;
129 
130 		/* Configure the system interrupt data */
131 		if ((status = nxge_fzc_intr_sid_set(nxgep)) != NXGE_OK)
132 			goto fzc_intr_init_exit;
133 	}
134 
135 fzc_intr_init_exit:
136 
137 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_init"));
138 
139 	return (status);
140 }
141 
142 nxge_status_t
143 nxge_fzc_intr_ldg_num_set(p_nxge_t nxgep)
144 {
145 	p_nxge_ldg_t	ldgp;
146 	p_nxge_ldv_t	ldvp;
147 	npi_handle_t	handle;
148 	int		i, j;
149 	npi_status_t	rs = NPI_SUCCESS;
150 
151 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_ldg_num_set"));
152 
153 	if (nxgep->ldgvp == NULL) {
154 		return (NXGE_ERROR);
155 	}
156 
157 	ldgp = nxgep->ldgvp->ldgp;
158 	ldvp = nxgep->ldgvp->ldvp;
159 	if (ldgp == NULL || ldvp == NULL) {
160 		return (NXGE_ERROR);
161 	}
162 
163 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
164 
165 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
166 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
167 			"==> nxge_fzc_intr_ldg_num_set "
168 			"<== nxge_f(Neptune): # ldv %d "
169 			"in group %d", ldgp->nldvs, ldgp->ldg));
170 
171 		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
172 			rs = npi_fzc_ldg_num_set(handle, ldvp->ldv,
173 				ldvp->ldg_assigned);
174 			if (rs != NPI_SUCCESS) {
175 				NXGE_DEBUG_MSG((nxgep, INT_CTL,
176 					"<== nxge_fzc_intr_ldg_num_set failed "
177 					" rs 0x%x ldv %d ldg %d",
178 					rs, ldvp->ldv, ldvp->ldg_assigned));
179 				return (NXGE_ERROR | rs);
180 			}
181 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
182 				"<== nxge_fzc_intr_ldg_num_set OK "
183 				" ldv %d ldg %d",
184 				ldvp->ldv, ldvp->ldg_assigned));
185 		}
186 	}
187 
188 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_ldg_num_set"));
189 
190 	return (NXGE_OK);
191 }
192 
193 nxge_status_t
194 nxge_fzc_intr_tmres_set(p_nxge_t nxgep)
195 {
196 	npi_handle_t	handle;
197 	npi_status_t	rs = NPI_SUCCESS;
198 
199 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_tmrese_set"));
200 	if (nxgep->ldgvp == NULL) {
201 		return (NXGE_ERROR);
202 	}
203 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
204 	if ((rs = npi_fzc_ldg_timer_res_set(handle, nxgep->ldgvp->tmres))) {
205 		return (NXGE_ERROR | rs);
206 	}
207 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_tmrese_set"));
208 
209 	return (NXGE_OK);
210 }
211 
212 nxge_status_t
213 nxge_fzc_intr_sid_set(p_nxge_t nxgep)
214 {
215 	npi_handle_t	handle;
216 	p_nxge_ldg_t	ldgp;
217 	fzc_sid_t	sid;
218 	int		i;
219 	npi_status_t	rs = NPI_SUCCESS;
220 
221 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_sid_set"));
222 	if (nxgep->ldgvp == NULL) {
223 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
224 			"<== nxge_fzc_intr_sid_set: no ldg"));
225 		return (NXGE_ERROR);
226 	}
227 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
228 	ldgp = nxgep->ldgvp->ldgp;
229 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
230 		"==> nxge_fzc_intr_sid_set: #int %d", nxgep->ldgvp->ldg_intrs));
231 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
232 		sid.ldg = ldgp->ldg;
233 		sid.niu = B_FALSE;
234 		sid.func = ldgp->func;
235 		sid.vector = ldgp->vector;
236 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
237 			"==> nxge_fzc_intr_sid_set(%d): func %d group %d "
238 			"vector %d",
239 			i, sid.func, sid.ldg, sid.vector));
240 		rs = npi_fzc_sid_set(handle, sid);
241 		if (rs != NPI_SUCCESS) {
242 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
243 				"<== nxge_fzc_intr_sid_set:failed 0x%x",
244 				rs));
245 			return (NXGE_ERROR | rs);
246 		}
247 	}
248 
249 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_sid_set"));
250 
251 	return (NXGE_OK);
252 
253 }
254 
255 /*
256  * Receive DMA registers that are under function zero
257  * management.
258  */
259 /*ARGSUSED*/
260 nxge_status_t
261 nxge_init_fzc_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
262 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
263 {
264 	nxge_status_t	status = NXGE_OK;
265 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_init_fzc_rxdma_channel"));
266 
267 	if (nxgep->niu_type == N2_NIU) {
268 #ifndef	NIU_HV_WORKAROUND
269 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
270 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
271 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - call HV "
272 		    "set up logical pages"));
273 		/* Initialize the RXDMA logical pages */
274 		status = nxge_init_hv_fzc_rxdma_channel_pages(nxgep, channel,
275 			rbr_p);
276 		if (status != NXGE_OK) {
277 			return (status);
278 		}
279 #endif
280 		status = NXGE_OK;
281 #else
282 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
283 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
284 		    "set up logical pages"));
285 		/* Initialize the RXDMA logical pages */
286 		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
287 		    rbr_p);
288 		if (status != NXGE_OK) {
289 			return (status);
290 		}
291 #endif
292 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
293 		/* Initialize the RXDMA logical pages */
294 		status = nxge_init_fzc_rxdma_channel_pages(nxgep,
295 		    channel, rbr_p);
296 		if (status != NXGE_OK) {
297 			return (status);
298 		}
299 	} else {
300 		return (NXGE_ERROR);
301 	}
302 
303 	/* Configure RED parameters */
304 	status = nxge_init_fzc_rxdma_channel_red(nxgep, channel, rcr_p);
305 
306 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_init_fzc_rxdma_channel"));
307 	return (status);
308 }
309 
310 /*ARGSUSED*/
311 nxge_status_t
312 nxge_init_fzc_rxdma_channel_pages(p_nxge_t nxgep,
313 		uint16_t channel, p_rx_rbr_ring_t rbrp)
314 {
315 	npi_handle_t		handle;
316 	dma_log_page_t		cfg;
317 	npi_status_t		rs = NPI_SUCCESS;
318 
319 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
320 		"==> nxge_init_fzc_rxdma_channel_pages"));
321 
322 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
323 	/*
324 	 * Initialize logical page 1.
325 	 */
326 	cfg.func_num = nxgep->function_num;
327 	cfg.page_num = 0;
328 	cfg.valid = rbrp->page_valid.bits.ldw.page0;
329 	cfg.value = rbrp->page_value_1.value;
330 	cfg.mask = rbrp->page_mask_1.value;
331 	cfg.reloc = rbrp->page_reloc_1.value;
332 	rs = npi_rxdma_cfg_logical_page(handle, channel,
333 			(p_dma_log_page_t)&cfg);
334 	if (rs != NPI_SUCCESS) {
335 		return (NXGE_ERROR | rs);
336 	}
337 
338 	/*
339 	 * Initialize logical page 2.
340 	 */
341 	cfg.page_num = 1;
342 	cfg.valid = rbrp->page_valid.bits.ldw.page1;
343 	cfg.value = rbrp->page_value_2.value;
344 	cfg.mask = rbrp->page_mask_2.value;
345 	cfg.reloc = rbrp->page_reloc_2.value;
346 
347 	rs = npi_rxdma_cfg_logical_page(handle, channel, &cfg);
348 	if (rs != NPI_SUCCESS) {
349 		return (NXGE_ERROR | rs);
350 	}
351 
352 	/* Initialize the page handle */
353 	rs = npi_rxdma_cfg_logical_page_handle(handle, channel,
354 			rbrp->page_hdl.bits.ldw.handle);
355 
356 	if (rs != NPI_SUCCESS) {
357 		return (NXGE_ERROR | rs);
358 	}
359 
360 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
361 		"<== nxge_init_fzc_rxdma_channel_pages"));
362 
363 	return (NXGE_OK);
364 }
365 
366 /*ARGSUSED*/
367 nxge_status_t
368 nxge_init_fzc_rxdma_channel_red(p_nxge_t nxgep,
369 	uint16_t channel, p_rx_rcr_ring_t rcr_p)
370 {
371 	npi_handle_t		handle;
372 	rdc_red_para_t		red;
373 	npi_status_t		rs = NPI_SUCCESS;
374 
375 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_channel_red"));
376 
377 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
378 	red.value = 0;
379 	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
380 	red.bits.ldw.thre = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
381 	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
382 	red.bits.ldw.thre_sync = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
383 
384 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
385 		"==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
386 		red.bits.ldw.thre_sync,
387 		red.bits.ldw.thre_sync));
388 
389 	rs = npi_rxdma_cfg_wred_param(handle, channel, &red);
390 	if (rs != NPI_SUCCESS) {
391 		return (NXGE_ERROR | rs);
392 	}
393 
394 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
395 		"<== nxge_init_fzc_rxdma_channel_red"));
396 
397 	return (NXGE_OK);
398 }
399 
400 /*ARGSUSED*/
401 nxge_status_t
402 nxge_init_fzc_txdma_channel(p_nxge_t nxgep, uint16_t channel,
403 	p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p)
404 {
405 	nxge_status_t	status = NXGE_OK;
406 
407 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
408 		"==> nxge_init_fzc_txdma_channel"));
409 
410 	if (nxgep->niu_type == N2_NIU) {
411 #ifndef	NIU_HV_WORKAROUND
412 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
413 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
414 		    "==> nxge_init_fzc_txdma_channel "
415 		    "N2_NIU: call HV to set up txdma logical pages"));
416 		status = nxge_init_hv_fzc_txdma_channel_pages(nxgep, channel,
417 		    tx_ring_p);
418 		if (status != NXGE_OK) {
419 			return (status);
420 		}
421 #endif
422 		status = NXGE_OK;
423 #else
424 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
425 		    "==> nxge_init_fzc_txdma_channel "
426 		    "N2_NIU: NEED to set up txdma logical pages"));
427 		/* Initialize the TXDMA logical pages */
428 		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
429 		    tx_ring_p);
430 #endif
431 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
432 		/* Initialize the TXDMA logical pages */
433 		(void) nxge_init_fzc_txdma_channel_pages(nxgep,
434 		    channel, tx_ring_p);
435 	} else {
436 		return (NXGE_ERROR);
437 	}
438 
439 	/*
440 	 * Configure Transmit DRR Weight parameters
441 	 * (It actually programs the TXC max burst register).
442 	 */
443 	(void) nxge_init_fzc_txdma_channel_drr(nxgep, channel, tx_ring_p);
444 
445 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
446 		"<== nxge_init_fzc_txdma_channel"));
447 	return (status);
448 }
449 
450 nxge_status_t
451 nxge_init_fzc_common(p_nxge_t nxgep)
452 {
453 	nxge_status_t	status = NXGE_OK;
454 
455 	(void) nxge_init_fzc_rx_common(nxgep);
456 
457 	return (status);
458 }
459 
460 nxge_status_t
461 nxge_init_fzc_rx_common(p_nxge_t nxgep)
462 {
463 	npi_handle_t	handle;
464 	npi_status_t	rs = NPI_SUCCESS;
465 	nxge_status_t	status = NXGE_OK;
466 	clock_t		lbolt;
467 
468 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rx_common"));
469 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
470 	if (!handle.regp) {
471 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
472 			"==> nxge_init_fzc_rx_common null ptr"));
473 		return (NXGE_ERROR);
474 	}
475 
476 	/*
477 	 * Configure the rxdma clock divider
478 	 * This is the granularity counter based on
479 	 * the hardware system clock (i.e. 300 Mhz) and
480 	 * it is running around 3 nanoseconds.
481 	 * So, set the clock divider counter to 1000 to get
482 	 * microsecond granularity.
483 	 * For example, for a 3 microsecond timeout, the timeout
484 	 * will be set to 1.
485 	 */
486 	rs = npi_rxdma_cfg_clock_div_set(handle, RXDMA_CK_DIV_DEFAULT);
487 	if (rs != NPI_SUCCESS)
488 		return (NXGE_ERROR | rs);
489 
490 #if defined(__i386)
491 	rs = npi_rxdma_cfg_32bitmode_enable(handle);
492 	if (rs != NPI_SUCCESS)
493 		return (NXGE_ERROR | rs);
494 	rs = npi_txdma_mode32_set(handle, B_TRUE);
495 	if (rs != NPI_SUCCESS)
496 		return (NXGE_ERROR | rs);
497 #endif
498 
499 	/*
500 	 * Enable WRED and program an initial value.
501 	 * Use time to set the initial random number.
502 	 */
503 	(void) drv_getparm(LBOLT, &lbolt);
504 	rs = npi_rxdma_cfg_red_rand_init(handle, (uint16_t)lbolt);
505 	if (rs != NPI_SUCCESS)
506 		return (NXGE_ERROR | rs);
507 
508 	/* Initialize the RDC tables for each group */
509 	status = nxge_init_fzc_rdc_tbl(nxgep);
510 
511 
512 	/* Ethernet Timeout Counter (?) */
513 
514 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
515 		"<== nxge_init_fzc_rx_common:status 0x%08x", status));
516 
517 	return (status);
518 }
519 
520 nxge_status_t
521 nxge_init_fzc_rdc_tbl(p_nxge_t nxgep)
522 {
523 	npi_handle_t		handle;
524 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
525 	p_nxge_hw_pt_cfg_t	p_cfgp;
526 	p_nxge_rdc_grp_t	rdc_grp_p;
527 	uint8_t 		grp_tbl_id;
528 	int			ngrps;
529 	int			i;
530 	npi_status_t		rs = NPI_SUCCESS;
531 	nxge_status_t		status = NXGE_OK;
532 
533 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rdc_tbl"));
534 
535 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
536 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
537 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
538 
539 	grp_tbl_id = p_cfgp->start_rdc_grpid;
540 	rdc_grp_p = &p_dma_cfgp->rdc_grps[0];
541 	ngrps = p_cfgp->max_rdc_grpids;
542 	for (i = 0; i < ngrps; i++, rdc_grp_p++) {
543 		rs = npi_rxdma_cfg_rdc_table(handle, grp_tbl_id++,
544 			rdc_grp_p->rdc);
545 		if (rs != NPI_SUCCESS) {
546 			status = NXGE_ERROR | rs;
547 			break;
548 		}
549 	}
550 
551 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_rdc_tbl"));
552 	return (status);
553 }
554 
555 nxge_status_t
556 nxge_init_fzc_rxdma_port(p_nxge_t nxgep)
557 {
558 	npi_handle_t		handle;
559 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
560 	p_nxge_hw_pt_cfg_t	p_cfgp;
561 	hostinfo_t 		hostinfo;
562 	int			i;
563 	npi_status_t		rs = NPI_SUCCESS;
564 	p_nxge_class_pt_cfg_t 	p_class_cfgp;
565 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_port"));
566 
567 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
568 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
569 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
570 	/*
571 	 * Initialize the port scheduler DRR weight.
572 	 * npi_rxdma_cfg_port_ddr_weight();
573 	 */
574 
575 	if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
576 	    (nxgep->mac.portmode == PORT_1G_FIBER) ||
577 	    (nxgep->mac.portmode == PORT_1G_SERDES)) {
578 		rs = npi_rxdma_cfg_port_ddr_weight(handle,
579 		    nxgep->function_num, NXGE_RX_DRR_WT_1G);
580 		if (rs != NPI_SUCCESS) {
581 			return (NXGE_ERROR | rs);
582 		}
583 	}
584 
585 	/* Program the default RDC of a port */
586 	rs = npi_rxdma_cfg_default_port_rdc(handle, nxgep->function_num,
587 			p_cfgp->def_rdc);
588 	if (rs != NPI_SUCCESS) {
589 		return (NXGE_ERROR | rs);
590 	}
591 
592 	/*
593 	 * Configure the MAC host info table with RDC tables
594 	 */
595 	hostinfo.value = 0;
596 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
597 	for (i = 0; i < p_cfgp->max_macs; i++) {
598 		hostinfo.bits.w0.rdc_tbl_num = p_cfgp->start_rdc_grpid;
599 		hostinfo.bits.w0.mac_pref = p_cfgp->mac_pref;
600 		if (p_class_cfgp->mac_host_info[i].flag) {
601 			hostinfo.bits.w0.rdc_tbl_num =
602 				p_class_cfgp->mac_host_info[i].rdctbl;
603 			hostinfo.bits.w0.mac_pref =
604 				p_class_cfgp->mac_host_info[i].mpr_npr;
605 		}
606 
607 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
608 				nxgep->function_num, i, &hostinfo);
609 		if (rs != NPI_SUCCESS)
610 			return (NXGE_ERROR | rs);
611 	}
612 
613 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
614 		"<== nxge_init_fzc_rxdma_port rs 0x%08x", rs));
615 
616 	return (NXGE_OK);
617 
618 }
619 
620 nxge_status_t
621 nxge_fzc_dmc_def_port_rdc(p_nxge_t nxgep, uint8_t port, uint16_t rdc)
622 {
623 	npi_status_t rs = NPI_SUCCESS;
624 	rs = npi_rxdma_cfg_default_port_rdc(nxgep->npi_reg_handle,
625 				    port, rdc);
626 	if (rs & NPI_FAILURE)
627 		return (NXGE_ERROR | rs);
628 	return (NXGE_OK);
629 }
630 
631 nxge_status_t
632 nxge_init_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
633 	p_tx_ring_t tx_ring_p)
634 {
635 	npi_handle_t		handle;
636 	dma_log_page_t		cfg;
637 	npi_status_t		rs = NPI_SUCCESS;
638 
639 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
640 		"==> nxge_init_fzc_txdma_channel_pages"));
641 
642 #ifndef	NIU_HV_WORKAROUND
643 	if (nxgep->niu_type == N2_NIU) {
644 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
645 			"<== nxge_init_fzc_txdma_channel_pages: "
646 			"N2_NIU: no need to set txdma logical pages"));
647 		return (NXGE_OK);
648 	}
649 #else
650 	if (nxgep->niu_type == N2_NIU) {
651 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
652 			"<== nxge_init_fzc_txdma_channel_pages: "
653 			"N2_NIU: NEED to set txdma logical pages"));
654 	}
655 #endif
656 
657 	/*
658 	 * Initialize logical page 1.
659 	 */
660 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
661 	cfg.func_num = nxgep->function_num;
662 	cfg.page_num = 0;
663 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page0;
664 	cfg.value = tx_ring_p->page_value_1.value;
665 	cfg.mask = tx_ring_p->page_mask_1.value;
666 	cfg.reloc = tx_ring_p->page_reloc_1.value;
667 
668 	rs = npi_txdma_log_page_set(handle, channel,
669 		(p_dma_log_page_t)&cfg);
670 	if (rs != NPI_SUCCESS) {
671 		return (NXGE_ERROR | rs);
672 	}
673 
674 	/*
675 	 * Initialize logical page 2.
676 	 */
677 	cfg.page_num = 1;
678 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page1;
679 	cfg.value = tx_ring_p->page_value_2.value;
680 	cfg.mask = tx_ring_p->page_mask_2.value;
681 	cfg.reloc = tx_ring_p->page_reloc_2.value;
682 
683 	rs = npi_txdma_log_page_set(handle, channel, &cfg);
684 	if (rs != NPI_SUCCESS) {
685 		return (NXGE_ERROR | rs);
686 	}
687 
688 	/* Initialize the page handle */
689 	rs = npi_txdma_log_page_handle_set(handle, channel,
690 			&tx_ring_p->page_hdl);
691 
692 	if (rs == NPI_SUCCESS) {
693 		return (NXGE_OK);
694 	} else {
695 		return (NXGE_ERROR | rs);
696 	}
697 }
698 
699 
700 nxge_status_t
701 nxge_init_fzc_txdma_channel_drr(p_nxge_t nxgep, uint16_t channel,
702 	p_tx_ring_t tx_ring_p)
703 {
704 	npi_status_t	rs = NPI_SUCCESS;
705 	npi_handle_t	handle;
706 
707 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
708 	rs = npi_txc_dma_max_burst_set(handle, channel,
709 			tx_ring_p->max_burst.value);
710 	if (rs == NPI_SUCCESS) {
711 		return (NXGE_OK);
712 	} else {
713 		return (NXGE_ERROR | rs);
714 	}
715 }
716 
717 nxge_status_t
718 nxge_fzc_sys_err_mask_set(p_nxge_t nxgep, uint64_t mask)
719 {
720 	npi_status_t	rs = NPI_SUCCESS;
721 	npi_handle_t	handle;
722 
723 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
724 	rs = npi_fzc_sys_err_mask_set(handle, mask);
725 	if (rs == NPI_SUCCESS) {
726 		return (NXGE_OK);
727 	} else {
728 		return (NXGE_ERROR | rs);
729 	}
730 }
731 
732 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
733 nxge_status_t
734 nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
735 	p_tx_ring_t tx_ring_p)
736 {
737 	int			err;
738 	uint64_t		hverr;
739 #ifdef	DEBUG
740 	uint64_t		ra, size;
741 #endif
742 
743 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
744 		"==> nxge_init_hv_fzc_txdma_channel_pages"));
745 
746 	if (tx_ring_p->hv_set) {
747 		return (NXGE_OK);
748 	}
749 
750 	/*
751 	 * Initialize logical page 1 for data buffers.
752 	 */
753 	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
754 			(uint64_t)0,
755 			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
756 			tx_ring_p->hv_tx_buf_ioaddr_size);
757 
758 	err = (nxge_status_t)nxge_herr2kerr(hverr);
759 	if (err != 0) {
760 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
761 			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d "
762 			"error status 0x%x "
763 			"(page 0 data buf) hverr 0x%llx "
764 			"ioaddr_pp $%p "
765 			"size 0x%llx ",
766 			channel,
767 			err,
768 			hverr,
769 			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
770 			tx_ring_p->hv_tx_buf_ioaddr_size));
771 		return (NXGE_ERROR | err);
772 	}
773 
774 #ifdef	DEBUG
775 	ra = size = 0;
776 	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
777 			(uint64_t)0,
778 			&ra,
779 			&size);
780 
781 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
782 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
783 		"ok status 0x%x "
784 		"(page 0 data buf) hverr 0x%llx "
785 		"set ioaddr_pp $%p "
786 		"set size 0x%llx "
787 		"get ra ioaddr_pp $%p "
788 		"get size 0x%llx ",
789 		channel,
790 		err,
791 		hverr,
792 		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
793 		tx_ring_p->hv_tx_buf_ioaddr_size,
794 		ra,
795 		size));
796 #endif
797 
798 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
799 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
800 		"(page 0 data buf) hverr 0x%llx "
801 		"ioaddr_pp $%p "
802 		"size 0x%llx ",
803 		channel,
804 		hverr,
805 		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
806 		tx_ring_p->hv_tx_buf_ioaddr_size));
807 
808 	/*
809 	 * Initialize logical page 2 for control buffers.
810 	 */
811 	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
812 			(uint64_t)1,
813 			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
814 			tx_ring_p->hv_tx_cntl_ioaddr_size);
815 
816 	err = (nxge_status_t)nxge_herr2kerr(hverr);
817 
818 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
819 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d"
820 		"ok status 0x%x "
821 		"(page 1 cntl buf) hverr 0x%llx "
822 		"ioaddr_pp $%p "
823 		"size 0x%llx ",
824 		channel,
825 		err,
826 		hverr,
827 		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
828 		tx_ring_p->hv_tx_cntl_ioaddr_size));
829 
830 	if (err != 0) {
831 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
832 			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d"
833 			"error status 0x%x "
834 			"(page 1 cntl buf) hverr 0x%llx "
835 			"ioaddr_pp $%p "
836 			"size 0x%llx ",
837 			channel,
838 			err,
839 			hverr,
840 			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
841 			tx_ring_p->hv_tx_cntl_ioaddr_size));
842 		return (NXGE_ERROR | err);
843 	}
844 
845 #ifdef	DEBUG
846 	ra = size = 0;
847 	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
848 			(uint64_t)1,
849 			&ra,
850 			&size);
851 
852 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
853 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
854 		"(page 1 cntl buf) hverr 0x%llx "
855 		"set ioaddr_pp $%p "
856 		"set size 0x%llx "
857 		"get ra ioaddr_pp $%p "
858 		"get size 0x%llx ",
859 		channel,
860 		hverr,
861 		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
862 		tx_ring_p->hv_tx_cntl_ioaddr_size,
863 		ra,
864 		size));
865 #endif
866 
867 	tx_ring_p->hv_set = B_TRUE;
868 
869 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
870 		"<== nxge_init_hv_fzc_txdma_channel_pages"));
871 
872 	return (NXGE_OK);
873 }
874 
875 /*ARGSUSED*/
876 nxge_status_t
877 nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t nxgep,
878 		uint16_t channel, p_rx_rbr_ring_t rbrp)
879 {
880 	int			err;
881 	uint64_t		hverr;
882 #ifdef	DEBUG
883 	uint64_t		ra, size;
884 #endif
885 
886 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
887 		"==> nxge_init_hv_fzc_rxdma_channel_pages"));
888 
889 	if (rbrp->hv_set) {
890 		return (NXGE_OK);
891 	}
892 
893 	/* Initialize data buffers for page 0 */
894 	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
895 			(uint64_t)0,
896 			rbrp->hv_rx_buf_base_ioaddr_pp,
897 			rbrp->hv_rx_buf_ioaddr_size);
898 	err = (nxge_status_t)nxge_herr2kerr(hverr);
899 	if (err != 0) {
900 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
901 			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
902 			"error status 0x%x "
903 			"(page 0 data buf) hverr 0x%llx "
904 			"ioaddr_pp $%p "
905 			"size 0x%llx ",
906 			channel,
907 			err,
908 			hverr,
909 			rbrp->hv_rx_buf_base_ioaddr_pp,
910 			rbrp->hv_rx_buf_ioaddr_size));
911 
912 		return (NXGE_ERROR | err);
913 	}
914 
915 #ifdef	DEBUG
916 	ra = size = 0;
917 	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
918 			(uint64_t)0,
919 			&ra,
920 			&size);
921 
922 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
923 		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
924 		"ok status 0x%x "
925 		"(page 0 data buf) hverr 0x%llx "
926 		"set databuf ioaddr_pp $%p "
927 		"set databuf size 0x%llx "
928 		"get databuf ra ioaddr_pp %p "
929 		"get databuf size 0x%llx",
930 		channel,
931 		err,
932 		hverr,
933 		rbrp->hv_rx_buf_base_ioaddr_pp,
934 		rbrp->hv_rx_buf_ioaddr_size,
935 		ra,
936 		size));
937 #endif
938 
939 	/* Initialize control buffers for logical page 1.  */
940 	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
941 			(uint64_t)1,
942 			rbrp->hv_rx_cntl_base_ioaddr_pp,
943 			rbrp->hv_rx_cntl_ioaddr_size);
944 
945 	err = (nxge_status_t)nxge_herr2kerr(hverr);
946 	if (err != 0) {
947 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
948 			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
949 			"error status 0x%x "
950 			"(page 1 cntl buf) hverr 0x%llx "
951 			"ioaddr_pp $%p "
952 			"size 0x%llx ",
953 			channel,
954 			err,
955 			hverr,
956 			rbrp->hv_rx_buf_base_ioaddr_pp,
957 			rbrp->hv_rx_buf_ioaddr_size));
958 
959 		return (NXGE_ERROR | err);
960 	}
961 
962 #ifdef	DEBUG
963 	ra = size = 0;
964 	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
965 			(uint64_t)1,
966 			&ra,
967 			&size);
968 
969 
970 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
971 		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
972 		"error status 0x%x "
973 		"(page 1 cntl buf) hverr 0x%llx "
974 		"set cntl ioaddr_pp $%p "
975 		"set cntl size 0x%llx "
976 		"get cntl ioaddr_pp $%p "
977 		"get cntl size 0x%llx ",
978 		channel,
979 		err,
980 		hverr,
981 		rbrp->hv_rx_cntl_base_ioaddr_pp,
982 		rbrp->hv_rx_cntl_ioaddr_size,
983 		ra,
984 		size));
985 #endif
986 
987 	rbrp->hv_set = B_FALSE;
988 
989 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
990 		"<== nxge_init_hv_fzc_rxdma_channel_pages"));
991 
992 	return (NXGE_OK);
993 }
994 
995 /*
996  * Map hypervisor error code to errno. Only
997  * H_ENORADDR, H_EBADALIGN and H_EINVAL are meaningful
998  * for niu driver. Any other error codes are mapped to EINVAL.
999  */
1000 static int
1001 nxge_herr2kerr(uint64_t hv_errcode)
1002 {
1003 	int	s_errcode;
1004 
1005 	switch (hv_errcode) {
1006 	case H_ENORADDR:
1007 	case H_EBADALIGN:
1008 		s_errcode = EFAULT;
1009 		break;
1010 	case H_EOK:
1011 		s_errcode = 0;
1012 		break;
1013 	default:
1014 		s_errcode = EINVAL;
1015 		break;
1016 	}
1017 	return (s_errcode);
1018 }
1019 
1020 #endif	/* sun4v and NIU_LP_WORKAROUND */
1021