xref: /titanic_44/usr/src/uts/common/io/nxge/nxge_fzc.c (revision 0398691684c2596072212e4ca9d7033ad7ccfa54)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include	<nxge_impl.h>
29 #include	<npi_mac.h>
30 #include	<npi_rxdma.h>
31 
32 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
33 static int	nxge_herr2kerr(uint64_t);
34 #endif
35 
36 /*
37  * The following interfaces are controlled by the
38  * function control registers. Some global registers
39  * are to be initialized by only byt one of the 2/4 functions.
40  * Use the test and set register.
41  */
42 /*ARGSUSED*/
43 nxge_status_t
44 nxge_test_and_set(p_nxge_t nxgep, uint8_t tas)
45 {
46 	npi_handle_t		handle;
47 	npi_status_t		rs = NPI_SUCCESS;
48 
49 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
50 	if ((rs = npi_dev_func_sr_sr_get_set_clear(handle, tas))
51 			!= NPI_SUCCESS) {
52 		return (NXGE_ERROR | rs);
53 	}
54 
55 	return (NXGE_OK);
56 }
57 
58 nxge_status_t
59 nxge_set_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t mpc)
60 {
61 	npi_handle_t		handle;
62 	npi_status_t		rs = NPI_SUCCESS;
63 
64 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_set_fzc_multi_part_ctl"));
65 
66 	/*
67 	 * In multi-partitioning, the partition manager
68 	 * who owns function zero should set this multi-partition
69 	 * control bit.
70 	 */
71 	if (nxgep->use_partition && nxgep->function_num) {
72 		return (NXGE_ERROR);
73 	}
74 
75 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
76 	if ((rs = npi_fzc_mpc_set(handle, mpc)) != NPI_SUCCESS) {
77 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
78 			"<== nxge_set_fzc_multi_part_ctl"));
79 		return (NXGE_ERROR | rs);
80 	}
81 
82 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_set_fzc_multi_part_ctl"));
83 
84 	return (NXGE_OK);
85 }
86 
87 nxge_status_t
88 nxge_get_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t *mpc_p)
89 {
90 	npi_handle_t		handle;
91 	npi_status_t		rs = NPI_SUCCESS;
92 
93 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
94 
95 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
96 	if ((rs = npi_fzc_mpc_get(handle, mpc_p)) != NPI_SUCCESS) {
97 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
98 			"<== nxge_set_fzc_multi_part_ctl"));
99 		return (NXGE_ERROR | rs);
100 	}
101 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
102 
103 	return (NXGE_OK);
104 }
105 
106 /*
107  * System interrupt registers that are under function zero
108  * management.
109  */
110 nxge_status_t
111 nxge_fzc_intr_init(p_nxge_t nxgep)
112 {
113 	nxge_status_t	status = NXGE_OK;
114 
115 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_init"));
116 
117 	/* Configure the initial timer resolution */
118 	if ((status = nxge_fzc_intr_tmres_set(nxgep)) != NXGE_OK) {
119 		return (status);
120 	}
121 
122 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) {
123 		/*
124 		 * Set up the logical device group's logical devices that
125 		 * the group owns.
126 		 */
127 		if ((status = nxge_fzc_intr_ldg_num_set(nxgep)) != NXGE_OK)
128 			goto fzc_intr_init_exit;
129 
130 		/* Configure the system interrupt data */
131 		if ((status = nxge_fzc_intr_sid_set(nxgep)) != NXGE_OK)
132 			goto fzc_intr_init_exit;
133 	}
134 
135 fzc_intr_init_exit:
136 
137 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_init"));
138 
139 	return (status);
140 }
141 
142 nxge_status_t
143 nxge_fzc_intr_ldg_num_set(p_nxge_t nxgep)
144 {
145 	p_nxge_ldg_t	ldgp;
146 	p_nxge_ldv_t	ldvp;
147 	npi_handle_t	handle;
148 	int		i, j;
149 	npi_status_t	rs = NPI_SUCCESS;
150 
151 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_ldg_num_set"));
152 
153 	if (nxgep->ldgvp == NULL) {
154 		return (NXGE_ERROR);
155 	}
156 
157 	ldgp = nxgep->ldgvp->ldgp;
158 	ldvp = nxgep->ldgvp->ldvp;
159 	if (ldgp == NULL || ldvp == NULL) {
160 		return (NXGE_ERROR);
161 	}
162 
163 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
164 
165 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
166 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
167 			"==> nxge_fzc_intr_ldg_num_set "
168 			"<== nxge_f(Neptune): # ldv %d "
169 			"in group %d", ldgp->nldvs, ldgp->ldg));
170 
171 		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
172 			rs = npi_fzc_ldg_num_set(handle, ldvp->ldv,
173 				ldvp->ldg_assigned);
174 			if (rs != NPI_SUCCESS) {
175 				NXGE_DEBUG_MSG((nxgep, INT_CTL,
176 					"<== nxge_fzc_intr_ldg_num_set failed "
177 					" rs 0x%x ldv %d ldg %d",
178 					rs, ldvp->ldv, ldvp->ldg_assigned));
179 				return (NXGE_ERROR | rs);
180 			}
181 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
182 				"<== nxge_fzc_intr_ldg_num_set OK "
183 				" ldv %d ldg %d",
184 				ldvp->ldv, ldvp->ldg_assigned));
185 		}
186 	}
187 
188 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_ldg_num_set"));
189 
190 	return (NXGE_OK);
191 }
192 
193 nxge_status_t
194 nxge_fzc_intr_tmres_set(p_nxge_t nxgep)
195 {
196 	npi_handle_t	handle;
197 	npi_status_t	rs = NPI_SUCCESS;
198 
199 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_tmrese_set"));
200 	if (nxgep->ldgvp == NULL) {
201 		return (NXGE_ERROR);
202 	}
203 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
204 	if ((rs = npi_fzc_ldg_timer_res_set(handle, nxgep->ldgvp->tmres))) {
205 		return (NXGE_ERROR | rs);
206 	}
207 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_tmrese_set"));
208 
209 	return (NXGE_OK);
210 }
211 
212 nxge_status_t
213 nxge_fzc_intr_sid_set(p_nxge_t nxgep)
214 {
215 	npi_handle_t	handle;
216 	p_nxge_ldg_t	ldgp;
217 	fzc_sid_t	sid;
218 	int		i;
219 	npi_status_t	rs = NPI_SUCCESS;
220 
221 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_sid_set"));
222 	if (nxgep->ldgvp == NULL) {
223 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
224 			"<== nxge_fzc_intr_sid_set: no ldg"));
225 		return (NXGE_ERROR);
226 	}
227 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
228 	ldgp = nxgep->ldgvp->ldgp;
229 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
230 		"==> nxge_fzc_intr_sid_set: #int %d", nxgep->ldgvp->ldg_intrs));
231 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
232 		sid.ldg = ldgp->ldg;
233 		sid.niu = B_FALSE;
234 		sid.func = ldgp->func;
235 		sid.vector = ldgp->vector;
236 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
237 			"==> nxge_fzc_intr_sid_set(%d): func %d group %d "
238 			"vector %d",
239 			i, sid.func, sid.ldg, sid.vector));
240 		rs = npi_fzc_sid_set(handle, sid);
241 		if (rs != NPI_SUCCESS) {
242 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
243 				"<== nxge_fzc_intr_sid_set:failed 0x%x",
244 				rs));
245 			return (NXGE_ERROR | rs);
246 		}
247 	}
248 
249 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_sid_set"));
250 
251 	return (NXGE_OK);
252 
253 }
254 
255 /*
256  * Receive DMA registers that are under function zero
257  * management.
258  */
259 /*ARGSUSED*/
260 nxge_status_t
261 nxge_init_fzc_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
262 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
263 {
264 	nxge_status_t	status = NXGE_OK;
265 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_init_fzc_rxdma_channel"));
266 
267 	if (nxgep->niu_type == N2_NIU) {
268 #ifndef	NIU_HV_WORKAROUND
269 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
270 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
271 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - call HV "
272 		    "set up logical pages"));
273 		/* Initialize the RXDMA logical pages */
274 		status = nxge_init_hv_fzc_rxdma_channel_pages(nxgep, channel,
275 			rbr_p);
276 		if (status != NXGE_OK) {
277 			return (status);
278 		}
279 #endif
280 		status = NXGE_OK;
281 #else
282 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
283 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
284 		    "set up logical pages"));
285 		/* Initialize the RXDMA logical pages */
286 		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
287 		    rbr_p);
288 		if (status != NXGE_OK) {
289 			return (status);
290 		}
291 #endif
292 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) {
293 		/* Initialize the RXDMA logical pages */
294 		status = nxge_init_fzc_rxdma_channel_pages(nxgep,
295 		    channel, rbr_p);
296 		if (status != NXGE_OK) {
297 			return (status);
298 		}
299 	} else {
300 		return (NXGE_ERROR);
301 	}
302 
303 	/* Configure RED parameters */
304 	status = nxge_init_fzc_rxdma_channel_red(nxgep, channel, rcr_p);
305 
306 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_init_fzc_rxdma_channel"));
307 	return (status);
308 }
309 
310 /*ARGSUSED*/
311 nxge_status_t
312 nxge_init_fzc_rxdma_channel_pages(p_nxge_t nxgep,
313 		uint16_t channel, p_rx_rbr_ring_t rbrp)
314 {
315 	npi_handle_t		handle;
316 	dma_log_page_t		cfg;
317 	npi_status_t		rs = NPI_SUCCESS;
318 
319 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
320 		"==> nxge_init_fzc_rxdma_channel_pages"));
321 
322 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
323 	/*
324 	 * Initialize logical page 1.
325 	 */
326 	cfg.func_num = nxgep->function_num;
327 	cfg.page_num = 0;
328 	cfg.valid = rbrp->page_valid.bits.ldw.page0;
329 	cfg.value = rbrp->page_value_1.value;
330 	cfg.mask = rbrp->page_mask_1.value;
331 	cfg.reloc = rbrp->page_reloc_1.value;
332 	rs = npi_rxdma_cfg_logical_page(handle, channel,
333 			(p_dma_log_page_t)&cfg);
334 	if (rs != NPI_SUCCESS) {
335 		return (NXGE_ERROR | rs);
336 	}
337 
338 	/*
339 	 * Initialize logical page 2.
340 	 */
341 	cfg.page_num = 1;
342 	cfg.valid = rbrp->page_valid.bits.ldw.page1;
343 	cfg.value = rbrp->page_value_2.value;
344 	cfg.mask = rbrp->page_mask_2.value;
345 	cfg.reloc = rbrp->page_reloc_2.value;
346 
347 	rs = npi_rxdma_cfg_logical_page(handle, channel, &cfg);
348 	if (rs != NPI_SUCCESS) {
349 		return (NXGE_ERROR | rs);
350 	}
351 
352 	/* Initialize the page handle */
353 	rs = npi_rxdma_cfg_logical_page_handle(handle, channel,
354 			rbrp->page_hdl.bits.ldw.handle);
355 
356 	if (rs != NPI_SUCCESS) {
357 		return (NXGE_ERROR | rs);
358 	}
359 
360 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
361 		"<== nxge_init_fzc_rxdma_channel_pages"));
362 
363 	return (NXGE_OK);
364 }
365 
366 /*ARGSUSED*/
367 nxge_status_t
368 nxge_init_fzc_rxdma_channel_red(p_nxge_t nxgep,
369 	uint16_t channel, p_rx_rcr_ring_t rcr_p)
370 {
371 	npi_handle_t		handle;
372 	rdc_red_para_t		red;
373 	npi_status_t		rs = NPI_SUCCESS;
374 
375 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_channel_red"));
376 
377 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
378 	red.value = 0;
379 	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
380 	red.bits.ldw.thre = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
381 	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
382 	red.bits.ldw.thre_sync = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
383 
384 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
385 		"==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
386 		red.bits.ldw.thre_sync,
387 		red.bits.ldw.thre_sync));
388 
389 	rs = npi_rxdma_cfg_wred_param(handle, channel, &red);
390 	if (rs != NPI_SUCCESS) {
391 		return (NXGE_ERROR | rs);
392 	}
393 
394 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
395 		"<== nxge_init_fzc_rxdma_channel_red"));
396 
397 	return (NXGE_OK);
398 }
399 
400 /*ARGSUSED*/
401 nxge_status_t
402 nxge_init_fzc_txdma_channel(p_nxge_t nxgep, uint16_t channel,
403 	p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p)
404 {
405 	nxge_status_t	status = NXGE_OK;
406 
407 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
408 		"==> nxge_init_fzc_txdma_channel"));
409 
410 	if (nxgep->niu_type == N2_NIU) {
411 #ifndef	NIU_HV_WORKAROUND
412 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
413 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
414 		    "==> nxge_init_fzc_txdma_channel "
415 		    "N2_NIU: call HV to set up txdma logical pages"));
416 		status = nxge_init_hv_fzc_txdma_channel_pages(nxgep, channel,
417 		    tx_ring_p);
418 		if (status != NXGE_OK) {
419 			return (status);
420 		}
421 #endif
422 		status = NXGE_OK;
423 #else
424 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
425 		    "==> nxge_init_fzc_txdma_channel "
426 		    "N2_NIU: NEED to set up txdma logical pages"));
427 		/* Initialize the TXDMA logical pages */
428 		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
429 		    tx_ring_p);
430 #endif
431 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) {
432 		/* Initialize the TXDMA logical pages */
433 		(void) nxge_init_fzc_txdma_channel_pages(nxgep,
434 		    channel, tx_ring_p);
435 	} else {
436 		return (NXGE_ERROR);
437 	}
438 
439 	/*
440 	 * Configure Transmit DRR Weight parameters
441 	 * (It actually programs the TXC max burst register).
442 	 */
443 	(void) nxge_init_fzc_txdma_channel_drr(nxgep, channel, tx_ring_p);
444 
445 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
446 		"<== nxge_init_fzc_txdma_channel"));
447 	return (status);
448 }
449 
450 nxge_status_t
451 nxge_init_fzc_common(p_nxge_t nxgep)
452 {
453 	nxge_status_t	status = NXGE_OK;
454 
455 	(void) nxge_init_fzc_rx_common(nxgep);
456 
457 	return (status);
458 }
459 
460 nxge_status_t
461 nxge_init_fzc_rx_common(p_nxge_t nxgep)
462 {
463 	npi_handle_t	handle;
464 	npi_status_t	rs = NPI_SUCCESS;
465 	nxge_status_t	status = NXGE_OK;
466 	clock_t		lbolt;
467 
468 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rx_common"));
469 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
470 	if (!handle.regp) {
471 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
472 			"==> nxge_init_fzc_rx_common null ptr"));
473 		return (NXGE_ERROR);
474 	}
475 
476 	/*
477 	 * Configure the rxdma clock divider
478 	 * This is the granularity counter based on
479 	 * the hardware system clock (i.e. 300 Mhz) and
480 	 * it is running around 3 nanoseconds.
481 	 * So, set the clock divider counter to 1000 to get
482 	 * microsecond granularity.
483 	 * For example, for a 3 microsecond timeout, the timeout
484 	 * will be set to 1.
485 	 */
486 	rs = npi_rxdma_cfg_clock_div_set(handle, RXDMA_CK_DIV_DEFAULT);
487 	if (rs != NPI_SUCCESS)
488 		return (NXGE_ERROR | rs);
489 
490 #if defined(__i386)
491 	rs = npi_rxdma_cfg_32bitmode_enable(handle);
492 	if (rs != NPI_SUCCESS)
493 		return (NXGE_ERROR | rs);
494 	rs = npi_txdma_mode32_set(handle, B_TRUE);
495 	if (rs != NPI_SUCCESS)
496 		return (NXGE_ERROR | rs);
497 #endif
498 
499 	/*
500 	 * Enable WRED and program an initial value.
501 	 * Use time to set the initial random number.
502 	 */
503 	(void) drv_getparm(LBOLT, &lbolt);
504 	rs = npi_rxdma_cfg_red_rand_init(handle, (uint16_t)lbolt);
505 	if (rs != NPI_SUCCESS)
506 		return (NXGE_ERROR | rs);
507 
508 	/* Initialize the RDC tables for each group */
509 	status = nxge_init_fzc_rdc_tbl(nxgep);
510 
511 
512 	/* Ethernet Timeout Counter (?) */
513 
514 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
515 		"<== nxge_init_fzc_rx_common:status 0x%08x", status));
516 
517 	return (status);
518 }
519 
520 nxge_status_t
521 nxge_init_fzc_rdc_tbl(p_nxge_t nxgep)
522 {
523 	npi_handle_t		handle;
524 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
525 	p_nxge_hw_pt_cfg_t	p_cfgp;
526 	p_nxge_rdc_grp_t	rdc_grp_p;
527 	uint8_t 		grp_tbl_id;
528 	int			ngrps;
529 	int			i;
530 	npi_status_t		rs = NPI_SUCCESS;
531 	nxge_status_t		status = NXGE_OK;
532 
533 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rdc_tbl"));
534 
535 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
536 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
537 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
538 
539 	grp_tbl_id = p_cfgp->start_rdc_grpid;
540 	rdc_grp_p = &p_dma_cfgp->rdc_grps[0];
541 	ngrps = p_cfgp->max_rdc_grpids;
542 	for (i = 0; i < ngrps; i++, rdc_grp_p++) {
543 		rs = npi_rxdma_cfg_rdc_table(handle, grp_tbl_id++,
544 			rdc_grp_p->rdc);
545 		if (rs != NPI_SUCCESS) {
546 			status = NXGE_ERROR | rs;
547 			break;
548 		}
549 	}
550 
551 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_rdc_tbl"));
552 	return (status);
553 }
554 
555 nxge_status_t
556 nxge_init_fzc_rxdma_port(p_nxge_t nxgep)
557 {
558 	npi_handle_t		handle;
559 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
560 	p_nxge_hw_pt_cfg_t	p_cfgp;
561 	hostinfo_t 		hostinfo;
562 	int			i;
563 	npi_status_t		rs = NPI_SUCCESS;
564 	p_nxge_class_pt_cfg_t 	p_class_cfgp;
565 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_port"));
566 
567 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
568 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
569 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
570 	/*
571 	 * Initialize the port scheduler DRR weight.
572 	 * npi_rxdma_cfg_port_ddr_weight();
573 	 */
574 
575 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) {
576 		if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
577 		    (nxgep->mac.portmode == PORT_1G_FIBER)) {
578 			rs = npi_rxdma_cfg_port_ddr_weight(handle,
579 			    nxgep->function_num,
580 			    NXGE_RX_DRR_WT_1G);
581 			if (rs != NPI_SUCCESS) {
582 				return (NXGE_ERROR | rs);
583 			}
584 		}
585 	}
586 
587 	/* Program the default RDC of a port */
588 	rs = npi_rxdma_cfg_default_port_rdc(handle, nxgep->function_num,
589 			p_cfgp->def_rdc);
590 	if (rs != NPI_SUCCESS) {
591 		return (NXGE_ERROR | rs);
592 	}
593 
594 	/*
595 	 * Configure the MAC host info table with RDC tables
596 	 */
597 	hostinfo.value = 0;
598 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
599 	for (i = 0; i < p_cfgp->max_macs; i++) {
600 		hostinfo.bits.w0.rdc_tbl_num = p_cfgp->start_rdc_grpid;
601 		hostinfo.bits.w0.mac_pref = p_cfgp->mac_pref;
602 		if (p_class_cfgp->mac_host_info[i].flag) {
603 			hostinfo.bits.w0.rdc_tbl_num =
604 				p_class_cfgp->mac_host_info[i].rdctbl;
605 			hostinfo.bits.w0.mac_pref =
606 				p_class_cfgp->mac_host_info[i].mpr_npr;
607 		}
608 
609 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
610 				nxgep->function_num, i, &hostinfo);
611 		if (rs != NPI_SUCCESS)
612 			return (NXGE_ERROR | rs);
613 	}
614 
615 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
616 		"<== nxge_init_fzc_rxdma_port rs 0x%08x", rs));
617 
618 	return (NXGE_OK);
619 
620 }
621 
622 nxge_status_t
623 nxge_fzc_dmc_def_port_rdc(p_nxge_t nxgep, uint8_t port, uint16_t rdc)
624 {
625 	npi_status_t rs = NPI_SUCCESS;
626 	rs = npi_rxdma_cfg_default_port_rdc(nxgep->npi_reg_handle,
627 				    port, rdc);
628 	if (rs & NPI_FAILURE)
629 		return (NXGE_ERROR | rs);
630 	return (NXGE_OK);
631 }
632 
633 nxge_status_t
634 nxge_init_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
635 	p_tx_ring_t tx_ring_p)
636 {
637 	npi_handle_t		handle;
638 	dma_log_page_t		cfg;
639 	npi_status_t		rs = NPI_SUCCESS;
640 
641 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
642 		"==> nxge_init_fzc_txdma_channel_pages"));
643 
644 #ifndef	NIU_HV_WORKAROUND
645 	if (nxgep->niu_type == N2_NIU) {
646 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
647 			"<== nxge_init_fzc_txdma_channel_pages: "
648 			"N2_NIU: no need to set txdma logical pages"));
649 		return (NXGE_OK);
650 	}
651 #else
652 	if (nxgep->niu_type == N2_NIU) {
653 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
654 			"<== nxge_init_fzc_txdma_channel_pages: "
655 			"N2_NIU: NEED to set txdma logical pages"));
656 	}
657 #endif
658 
659 	/*
660 	 * Initialize logical page 1.
661 	 */
662 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
663 	cfg.func_num = nxgep->function_num;
664 	cfg.page_num = 0;
665 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page0;
666 	cfg.value = tx_ring_p->page_value_1.value;
667 	cfg.mask = tx_ring_p->page_mask_1.value;
668 	cfg.reloc = tx_ring_p->page_reloc_1.value;
669 
670 	rs = npi_txdma_log_page_set(handle, channel,
671 		(p_dma_log_page_t)&cfg);
672 	if (rs != NPI_SUCCESS) {
673 		return (NXGE_ERROR | rs);
674 	}
675 
676 	/*
677 	 * Initialize logical page 2.
678 	 */
679 	cfg.page_num = 1;
680 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page1;
681 	cfg.value = tx_ring_p->page_value_2.value;
682 	cfg.mask = tx_ring_p->page_mask_2.value;
683 	cfg.reloc = tx_ring_p->page_reloc_2.value;
684 
685 	rs = npi_txdma_log_page_set(handle, channel, &cfg);
686 	if (rs != NPI_SUCCESS) {
687 		return (NXGE_ERROR | rs);
688 	}
689 
690 	/* Initialize the page handle */
691 	rs = npi_txdma_log_page_handle_set(handle, channel,
692 			&tx_ring_p->page_hdl);
693 
694 	if (rs == NPI_SUCCESS) {
695 		return (NXGE_OK);
696 	} else {
697 		return (NXGE_ERROR | rs);
698 	}
699 }
700 
701 
702 nxge_status_t
703 nxge_init_fzc_txdma_channel_drr(p_nxge_t nxgep, uint16_t channel,
704 	p_tx_ring_t tx_ring_p)
705 {
706 	npi_status_t	rs = NPI_SUCCESS;
707 	npi_handle_t	handle;
708 
709 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
710 	rs = npi_txc_dma_max_burst_set(handle, channel,
711 			tx_ring_p->max_burst.value);
712 	if (rs == NPI_SUCCESS) {
713 		return (NXGE_OK);
714 	} else {
715 		return (NXGE_ERROR | rs);
716 	}
717 }
718 
719 nxge_status_t
720 nxge_fzc_sys_err_mask_set(p_nxge_t nxgep, uint64_t mask)
721 {
722 	npi_status_t	rs = NPI_SUCCESS;
723 	npi_handle_t	handle;
724 
725 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
726 	rs = npi_fzc_sys_err_mask_set(handle, mask);
727 	if (rs == NPI_SUCCESS) {
728 		return (NXGE_OK);
729 	} else {
730 		return (NXGE_ERROR | rs);
731 	}
732 }
733 
734 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
735 nxge_status_t
736 nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
737 	p_tx_ring_t tx_ring_p)
738 {
739 	int			err;
740 	uint64_t		hverr;
741 #ifdef	DEBUG
742 	uint64_t		ra, size;
743 #endif
744 
745 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
746 		"==> nxge_init_hv_fzc_txdma_channel_pages"));
747 
748 	if (tx_ring_p->hv_set) {
749 		return (NXGE_OK);
750 	}
751 
752 	/*
753 	 * Initialize logical page 1 for data buffers.
754 	 */
755 	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
756 			(uint64_t)0,
757 			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
758 			tx_ring_p->hv_tx_buf_ioaddr_size);
759 
760 	err = (nxge_status_t)nxge_herr2kerr(hverr);
761 	if (err != 0) {
762 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
763 			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d "
764 			"error status 0x%x "
765 			"(page 0 data buf) hverr 0x%llx "
766 			"ioaddr_pp $%p "
767 			"size 0x%llx ",
768 			channel,
769 			err,
770 			hverr,
771 			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
772 			tx_ring_p->hv_tx_buf_ioaddr_size));
773 		return (NXGE_ERROR | err);
774 	}
775 
776 #ifdef	DEBUG
777 	ra = size = 0;
778 	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
779 			(uint64_t)0,
780 			&ra,
781 			&size);
782 
783 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
784 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
785 		"ok status 0x%x "
786 		"(page 0 data buf) hverr 0x%llx "
787 		"set ioaddr_pp $%p "
788 		"set size 0x%llx "
789 		"get ra ioaddr_pp $%p "
790 		"get size 0x%llx ",
791 		channel,
792 		err,
793 		hverr,
794 		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
795 		tx_ring_p->hv_tx_buf_ioaddr_size,
796 		ra,
797 		size));
798 #endif
799 
800 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
801 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
802 		"(page 0 data buf) hverr 0x%llx "
803 		"ioaddr_pp $%p "
804 		"size 0x%llx ",
805 		channel,
806 		hverr,
807 		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
808 		tx_ring_p->hv_tx_buf_ioaddr_size));
809 
810 	/*
811 	 * Initialize logical page 2 for control buffers.
812 	 */
813 	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
814 			(uint64_t)1,
815 			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
816 			tx_ring_p->hv_tx_cntl_ioaddr_size);
817 
818 	err = (nxge_status_t)nxge_herr2kerr(hverr);
819 
820 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
821 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d"
822 		"ok status 0x%x "
823 		"(page 1 cntl buf) hverr 0x%llx "
824 		"ioaddr_pp $%p "
825 		"size 0x%llx ",
826 		channel,
827 		err,
828 		hverr,
829 		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
830 		tx_ring_p->hv_tx_cntl_ioaddr_size));
831 
832 	if (err != 0) {
833 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
834 			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d"
835 			"error status 0x%x "
836 			"(page 1 cntl buf) hverr 0x%llx "
837 			"ioaddr_pp $%p "
838 			"size 0x%llx ",
839 			channel,
840 			err,
841 			hverr,
842 			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
843 			tx_ring_p->hv_tx_cntl_ioaddr_size));
844 		return (NXGE_ERROR | err);
845 	}
846 
847 #ifdef	DEBUG
848 	ra = size = 0;
849 	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
850 			(uint64_t)1,
851 			&ra,
852 			&size);
853 
854 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
855 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
856 		"(page 1 cntl buf) hverr 0x%llx "
857 		"set ioaddr_pp $%p "
858 		"set size 0x%llx "
859 		"get ra ioaddr_pp $%p "
860 		"get size 0x%llx ",
861 		channel,
862 		hverr,
863 		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
864 		tx_ring_p->hv_tx_cntl_ioaddr_size,
865 		ra,
866 		size));
867 #endif
868 
869 	tx_ring_p->hv_set = B_TRUE;
870 
871 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
872 		"<== nxge_init_hv_fzc_txdma_channel_pages"));
873 
874 	return (NXGE_OK);
875 }
876 
877 /*ARGSUSED*/
878 nxge_status_t
879 nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t nxgep,
880 		uint16_t channel, p_rx_rbr_ring_t rbrp)
881 {
882 	int			err;
883 	uint64_t		hverr;
884 #ifdef	DEBUG
885 	uint64_t		ra, size;
886 #endif
887 
888 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
889 		"==> nxge_init_hv_fzc_rxdma_channel_pages"));
890 
891 	if (rbrp->hv_set) {
892 		return (NXGE_OK);
893 	}
894 
895 	/* Initialize data buffers for page 0 */
896 	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
897 			(uint64_t)0,
898 			rbrp->hv_rx_buf_base_ioaddr_pp,
899 			rbrp->hv_rx_buf_ioaddr_size);
900 	err = (nxge_status_t)nxge_herr2kerr(hverr);
901 	if (err != 0) {
902 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
903 			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
904 			"error status 0x%x "
905 			"(page 0 data buf) hverr 0x%llx "
906 			"ioaddr_pp $%p "
907 			"size 0x%llx ",
908 			channel,
909 			err,
910 			hverr,
911 			rbrp->hv_rx_buf_base_ioaddr_pp,
912 			rbrp->hv_rx_buf_ioaddr_size));
913 
914 		return (NXGE_ERROR | err);
915 	}
916 
917 #ifdef	DEBUG
918 	ra = size = 0;
919 	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
920 			(uint64_t)0,
921 			&ra,
922 			&size);
923 
924 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
925 		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
926 		"ok status 0x%x "
927 		"(page 0 data buf) hverr 0x%llx "
928 		"set databuf ioaddr_pp $%p "
929 		"set databuf size 0x%llx "
930 		"get databuf ra ioaddr_pp %p "
931 		"get databuf size 0x%llx",
932 		channel,
933 		err,
934 		hverr,
935 		rbrp->hv_rx_buf_base_ioaddr_pp,
936 		rbrp->hv_rx_buf_ioaddr_size,
937 		ra,
938 		size));
939 #endif
940 
941 	/* Initialize control buffers for logical page 1.  */
942 	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
943 			(uint64_t)1,
944 			rbrp->hv_rx_cntl_base_ioaddr_pp,
945 			rbrp->hv_rx_cntl_ioaddr_size);
946 
947 	err = (nxge_status_t)nxge_herr2kerr(hverr);
948 	if (err != 0) {
949 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
950 			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
951 			"error status 0x%x "
952 			"(page 1 cntl buf) hverr 0x%llx "
953 			"ioaddr_pp $%p "
954 			"size 0x%llx ",
955 			channel,
956 			err,
957 			hverr,
958 			rbrp->hv_rx_buf_base_ioaddr_pp,
959 			rbrp->hv_rx_buf_ioaddr_size));
960 
961 		return (NXGE_ERROR | err);
962 	}
963 
964 #ifdef	DEBUG
965 	ra = size = 0;
966 	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
967 			(uint64_t)1,
968 			&ra,
969 			&size);
970 
971 
972 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
973 		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
974 		"error status 0x%x "
975 		"(page 1 cntl buf) hverr 0x%llx "
976 		"set cntl ioaddr_pp $%p "
977 		"set cntl size 0x%llx "
978 		"get cntl ioaddr_pp $%p "
979 		"get cntl size 0x%llx ",
980 		channel,
981 		err,
982 		hverr,
983 		rbrp->hv_rx_cntl_base_ioaddr_pp,
984 		rbrp->hv_rx_cntl_ioaddr_size,
985 		ra,
986 		size));
987 #endif
988 
989 	rbrp->hv_set = B_FALSE;
990 
991 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
992 		"<== nxge_init_hv_fzc_rxdma_channel_pages"));
993 
994 	return (NXGE_OK);
995 }
996 
997 /*
998  * Map hypervisor error code to errno. Only
999  * H_ENORADDR, H_EBADALIGN and H_EINVAL are meaningful
1000  * for niu driver. Any other error codes are mapped to EINVAL.
1001  */
1002 static int
1003 nxge_herr2kerr(uint64_t hv_errcode)
1004 {
1005 	int	s_errcode;
1006 
1007 	switch (hv_errcode) {
1008 	case H_ENORADDR:
1009 	case H_EBADALIGN:
1010 		s_errcode = EFAULT;
1011 		break;
1012 	case H_EOK:
1013 		s_errcode = 0;
1014 		break;
1015 	default:
1016 		s_errcode = EINVAL;
1017 		break;
1018 	}
1019 	return (s_errcode);
1020 }
1021 
1022 #endif	/* sun4v and NIU_LP_WORKAROUND */
1023