xref: /titanic_52/usr/src/uts/common/io/nxge/nxge_rxdma.c (revision d8d4fa8f33f737d82c7dcd7ddd11b4342ce4fbca)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/nxge/nxge_impl.h>
29 #include <sys/nxge/nxge_rxdma.h>
30 #include <sys/nxge/nxge_hio.h>
31 
32 #if !defined(_BIG_ENDIAN)
33 #include <npi_rx_rd32.h>
34 #endif
35 #include <npi_rx_rd64.h>
36 #include <npi_rx_wr64.h>
37 
38 #define	NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp)	\
39 	(rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
40 #define	NXGE_ACTUAL_RDC(nxgep, rdc)	\
41 	(rdc + nxgep->pt_config.hw_config.start_rdc)
42 
43 /*
44  * Globals: tunable parameters (/etc/system or adb)
45  *
46  */
47 extern uint32_t nxge_rbr_size;
48 extern uint32_t nxge_rcr_size;
49 extern uint32_t	nxge_rbr_spare_size;
50 
51 extern uint32_t nxge_mblks_pending;
52 
53 /*
54  * Tunable to reduce the amount of time spent in the
55  * ISR doing Rx Processing.
56  */
57 extern uint32_t nxge_max_rx_pkts;
58 boolean_t nxge_jumbo_enable;
59 
60 /*
61  * Tunables to manage the receive buffer blocks.
62  *
63  * nxge_rx_threshold_hi: copy all buffers.
64  * nxge_rx_bcopy_size_type: receive buffer block size type.
65  * nxge_rx_threshold_lo: copy only up to tunable block size type.
66  */
67 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
68 extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
69 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
70 
71 extern uint32_t	nxge_cksum_offload;
72 
73 static nxge_status_t nxge_map_rxdma(p_nxge_t, int);
74 static void nxge_unmap_rxdma(p_nxge_t, int);
75 
76 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
77 
78 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int);
79 static void nxge_rxdma_hw_stop(p_nxge_t, int);
80 
81 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
82     p_nxge_dma_common_t *,  p_rx_rbr_ring_t *,
83     uint32_t,
84     p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
85     p_rx_mbox_t *);
86 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
87     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
88 
89 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
90     uint16_t,
91     p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
92     p_rx_rcr_ring_t *, p_rx_mbox_t *);
93 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
94     p_rx_rcr_ring_t, p_rx_mbox_t);
95 
96 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
97     uint16_t,
98     p_nxge_dma_common_t *,
99     p_rx_rbr_ring_t *, uint32_t);
100 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
101     p_rx_rbr_ring_t);
102 
103 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
104     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
105 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
106 
107 static mblk_t *
108 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int);
109 
110 static void nxge_receive_packet(p_nxge_t,
111 	p_rx_rcr_ring_t,
112 	p_rcr_entry_t,
113 	boolean_t *,
114 	mblk_t **, mblk_t **);
115 
116 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
117 
118 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
119 static void nxge_freeb(p_rx_msg_t);
120 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t);
121 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
122 
123 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
124 				uint32_t, uint32_t);
125 
126 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
127     p_rx_rbr_ring_t);
128 
129 
130 static nxge_status_t
131 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
132 
133 nxge_status_t
134 nxge_rx_port_fatal_err_recover(p_nxge_t);
135 
136 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
137 
138 nxge_status_t
139 nxge_init_rxdma_channels(p_nxge_t nxgep)
140 {
141 	nxge_grp_set_t *set = &nxgep->rx_set;
142 	int i, count;
143 
144 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
145 
146 	if (!isLDOMguest(nxgep)) {
147 		if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
148 			cmn_err(CE_NOTE, "hw_start_common");
149 			return (NXGE_ERROR);
150 		}
151 	}
152 
153 	/*
154 	 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
155 	 * We only have 8 hardware RDC tables, but we may have
156 	 * up to 16 logical (software-defined) groups of RDCS,
157 	 * if we make use of layer 3 & 4 hardware classification.
158 	 */
159 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
160 		if ((1 << i) & set->lg.map) {
161 			int channel;
162 			nxge_grp_t *group = set->group[i];
163 			for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
164 				if ((1 << channel) & group->map) {
165 					if ((nxge_grp_dc_add(nxgep,
166 						(vr_handle_t)group,
167 						VP_BOUND_RX, channel)))
168 						return (NXGE_ERROR);
169 				}
170 			}
171 		}
172 		if (++count == set->lg.count)
173 			break;
174 	}
175 
176 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
177 
178 	return (NXGE_OK);
179 }
180 
181 nxge_status_t
182 nxge_init_rxdma_channel(p_nxge_t nxge, int channel)
183 {
184 	nxge_status_t status;
185 
186 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel"));
187 
188 	status = nxge_map_rxdma(nxge, channel);
189 	if (status != NXGE_OK) {
190 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
191 		    "<== nxge_init_rxdma: status 0x%x", status));
192 		return (status);
193 	}
194 
195 	status = nxge_rxdma_hw_start(nxge, channel);
196 	if (status != NXGE_OK) {
197 		nxge_unmap_rxdma(nxge, channel);
198 	}
199 
200 	if (!nxge->statsp->rdc_ksp[channel])
201 		nxge_setup_rdc_kstats(nxge, channel);
202 
203 	NXGE_DEBUG_MSG((nxge, MEM2_CTL,
204 	    "<== nxge_init_rxdma_channel: status 0x%x", status));
205 
206 	return (status);
207 }
208 
209 void
210 nxge_uninit_rxdma_channels(p_nxge_t nxgep)
211 {
212 	nxge_grp_set_t *set = &nxgep->rx_set;
213 	int rdc;
214 
215 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
216 
217 	if (set->owned.map == 0) {
218 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
219 		    "nxge_uninit_rxdma_channels: no channels"));
220 		return;
221 	}
222 
223 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
224 		if ((1 << rdc) & set->owned.map) {
225 			nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc);
226 		}
227 	}
228 
229 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels"));
230 }
231 
232 void
233 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel)
234 {
235 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel"));
236 
237 	if (nxgep->statsp->rdc_ksp[channel]) {
238 		kstat_delete(nxgep->statsp->rdc_ksp[channel]);
239 		nxgep->statsp->rdc_ksp[channel] = 0;
240 	}
241 
242 	nxge_rxdma_hw_stop(nxgep, channel);
243 	nxge_unmap_rxdma(nxgep, channel);
244 
245 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel"));
246 }
247 
248 nxge_status_t
249 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
250 {
251 	npi_handle_t		handle;
252 	npi_status_t		rs = NPI_SUCCESS;
253 	nxge_status_t		status = NXGE_OK;
254 
255 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
256 
257 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
258 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
259 
260 	if (rs != NPI_SUCCESS) {
261 		status = NXGE_ERROR | rs;
262 	}
263 
264 	return (status);
265 }
266 
267 void
268 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
269 {
270 	nxge_grp_set_t *set = &nxgep->rx_set;
271 	int rdc;
272 
273 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
274 
275 	if (!isLDOMguest(nxgep)) {
276 		npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
277 		(void) npi_rxdma_dump_fzc_regs(handle);
278 	}
279 
280 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
281 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
282 		    "nxge_rxdma_regs_dump_channels: "
283 		    "NULL ring pointer(s)"));
284 		return;
285 	}
286 
287 	if (set->owned.map == 0) {
288 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
289 		    "nxge_rxdma_regs_dump_channels: no channels"));
290 		return;
291 	}
292 
293 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
294 		if ((1 << rdc) & set->owned.map) {
295 			rx_rbr_ring_t *ring =
296 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
297 			if (ring) {
298 				(void) nxge_dump_rxdma_channel(nxgep, rdc);
299 			}
300 		}
301 	}
302 
303 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
304 }
305 
306 nxge_status_t
307 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
308 {
309 	npi_handle_t		handle;
310 	npi_status_t		rs = NPI_SUCCESS;
311 	nxge_status_t		status = NXGE_OK;
312 
313 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
314 
315 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
316 	rs = npi_rxdma_dump_rdc_regs(handle, channel);
317 
318 	if (rs != NPI_SUCCESS) {
319 		status = NXGE_ERROR | rs;
320 	}
321 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
322 	return (status);
323 }
324 
325 nxge_status_t
326 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
327     p_rx_dma_ent_msk_t mask_p)
328 {
329 	npi_handle_t		handle;
330 	npi_status_t		rs = NPI_SUCCESS;
331 	nxge_status_t		status = NXGE_OK;
332 
333 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
334 		"<== nxge_init_rxdma_channel_event_mask"));
335 
336 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
337 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
338 	if (rs != NPI_SUCCESS) {
339 		status = NXGE_ERROR | rs;
340 	}
341 
342 	return (status);
343 }
344 
345 nxge_status_t
346 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
347     p_rx_dma_ctl_stat_t cs_p)
348 {
349 	npi_handle_t		handle;
350 	npi_status_t		rs = NPI_SUCCESS;
351 	nxge_status_t		status = NXGE_OK;
352 
353 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
354 		"<== nxge_init_rxdma_channel_cntl_stat"));
355 
356 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
357 	rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
358 
359 	if (rs != NPI_SUCCESS) {
360 		status = NXGE_ERROR | rs;
361 	}
362 
363 	return (status);
364 }
365 
366 /*
367  * nxge_rxdma_cfg_rdcgrp_default_rdc
368  *
369  *	Set the default RDC for an RDC Group (Table)
370  *
371  * Arguments:
372  * 	nxgep
373  *	rdcgrp	The group to modify
374  *	rdc	The new default RDC.
375  *
376  * Notes:
377  *
378  * NPI/NXGE function calls:
379  *	npi_rxdma_cfg_rdc_table_default_rdc()
380  *
381  * Registers accessed:
382  *	RDC_TBL_REG: FZC_ZCP + 0x10000
383  *
384  * Context:
385  *	Service domain
386  */
387 nxge_status_t
388 nxge_rxdma_cfg_rdcgrp_default_rdc(
389 	p_nxge_t nxgep,
390 	uint8_t rdcgrp,
391 	uint8_t rdc)
392 {
393 	npi_handle_t		handle;
394 	npi_status_t		rs = NPI_SUCCESS;
395 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
396 	p_nxge_rdc_grp_t	rdc_grp_p;
397 	uint8_t actual_rdcgrp, actual_rdc;
398 
399 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
400 			    " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
401 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
402 
403 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
404 
405 	/*
406 	 * This has to be rewritten.  Do we even allow this anymore?
407 	 */
408 	rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
409 	RDC_MAP_IN(rdc_grp_p->map, rdc);
410 	rdc_grp_p->def_rdc = rdc;
411 
412 	actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
413 	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
414 
415 	rs = npi_rxdma_cfg_rdc_table_default_rdc(
416 		handle, actual_rdcgrp, actual_rdc);
417 
418 	if (rs != NPI_SUCCESS) {
419 		return (NXGE_ERROR | rs);
420 	}
421 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
422 			    " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
423 	return (NXGE_OK);
424 }
425 
426 nxge_status_t
427 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
428 {
429 	npi_handle_t		handle;
430 
431 	uint8_t actual_rdc;
432 	npi_status_t		rs = NPI_SUCCESS;
433 
434 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
435 			    " ==> nxge_rxdma_cfg_port_default_rdc"));
436 
437 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
438 	actual_rdc = rdc;	/* XXX Hack! */
439 	rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
440 
441 
442 	if (rs != NPI_SUCCESS) {
443 		return (NXGE_ERROR | rs);
444 	}
445 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
446 			    " <== nxge_rxdma_cfg_port_default_rdc"));
447 
448 	return (NXGE_OK);
449 }
450 
451 nxge_status_t
452 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
453 				    uint16_t pkts)
454 {
455 	npi_status_t	rs = NPI_SUCCESS;
456 	npi_handle_t	handle;
457 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
458 			    " ==> nxge_rxdma_cfg_rcr_threshold"));
459 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
460 
461 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
462 
463 	if (rs != NPI_SUCCESS) {
464 		return (NXGE_ERROR | rs);
465 	}
466 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
467 	return (NXGE_OK);
468 }
469 
470 nxge_status_t
471 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
472 			    uint16_t tout, uint8_t enable)
473 {
474 	npi_status_t	rs = NPI_SUCCESS;
475 	npi_handle_t	handle;
476 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
477 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
478 	if (enable == 0) {
479 		rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
480 	} else {
481 		rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
482 							    tout);
483 	}
484 
485 	if (rs != NPI_SUCCESS) {
486 		return (NXGE_ERROR | rs);
487 	}
488 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
489 	return (NXGE_OK);
490 }
491 
492 nxge_status_t
493 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
494     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
495 {
496 	npi_handle_t		handle;
497 	rdc_desc_cfg_t 		rdc_desc;
498 	p_rcrcfig_b_t		cfgb_p;
499 	npi_status_t		rs = NPI_SUCCESS;
500 
501 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
502 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
503 	/*
504 	 * Use configuration data composed at init time.
505 	 * Write to hardware the receive ring configurations.
506 	 */
507 	rdc_desc.mbox_enable = 1;
508 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
509 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
510 		"==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
511 		mbox_p->mbox_addr, rdc_desc.mbox_addr));
512 
513 	rdc_desc.rbr_len = rbr_p->rbb_max;
514 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
515 
516 	switch (nxgep->rx_bksize_code) {
517 	case RBR_BKSIZE_4K:
518 		rdc_desc.page_size = SIZE_4KB;
519 		break;
520 	case RBR_BKSIZE_8K:
521 		rdc_desc.page_size = SIZE_8KB;
522 		break;
523 	case RBR_BKSIZE_16K:
524 		rdc_desc.page_size = SIZE_16KB;
525 		break;
526 	case RBR_BKSIZE_32K:
527 		rdc_desc.page_size = SIZE_32KB;
528 		break;
529 	}
530 
531 	rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
532 	rdc_desc.valid0 = 1;
533 
534 	rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
535 	rdc_desc.valid1 = 1;
536 
537 	rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
538 	rdc_desc.valid2 = 1;
539 
540 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
541 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
542 
543 	rdc_desc.rcr_len = rcr_p->comp_size;
544 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
545 
546 	cfgb_p = &(rcr_p->rcr_cfgb);
547 	rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
548 	/* For now, disable this timeout in a guest domain. */
549 	if (isLDOMguest(nxgep)) {
550 		rdc_desc.rcr_timeout = 0;
551 		rdc_desc.rcr_timeout_enable = 0;
552 	} else {
553 		rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
554 		rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
555 	}
556 
557 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
558 		"rbr_len qlen %d pagesize code %d rcr_len %d",
559 		rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
560 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
561 		"size 0 %d size 1 %d size 2 %d",
562 		rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
563 		rbr_p->npi_pkt_buf_size2));
564 
565 	rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
566 	if (rs != NPI_SUCCESS) {
567 		return (NXGE_ERROR | rs);
568 	}
569 
570 	/*
571 	 * Enable the timeout and threshold.
572 	 */
573 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
574 			rdc_desc.rcr_threshold);
575 	if (rs != NPI_SUCCESS) {
576 		return (NXGE_ERROR | rs);
577 	}
578 
579 	rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
580 			rdc_desc.rcr_timeout);
581 	if (rs != NPI_SUCCESS) {
582 		return (NXGE_ERROR | rs);
583 	}
584 
585 	/* Enable the DMA */
586 	rs = npi_rxdma_cfg_rdc_enable(handle, channel);
587 	if (rs != NPI_SUCCESS) {
588 		return (NXGE_ERROR | rs);
589 	}
590 
591 	/* Kick the DMA engine. */
592 	npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
593 	/* Clear the rbr empty bit */
594 	(void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
595 
596 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
597 
598 	return (NXGE_OK);
599 }
600 
601 nxge_status_t
602 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
603 {
604 	npi_handle_t		handle;
605 	npi_status_t		rs = NPI_SUCCESS;
606 
607 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
608 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
609 
610 	/* disable the DMA */
611 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
612 	if (rs != NPI_SUCCESS) {
613 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
614 			"<== nxge_disable_rxdma_channel:failed (0x%x)",
615 			rs));
616 		return (NXGE_ERROR | rs);
617 	}
618 
619 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
620 	return (NXGE_OK);
621 }
622 
623 nxge_status_t
624 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
625 {
626 	npi_handle_t		handle;
627 	nxge_status_t		status = NXGE_OK;
628 
629 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
630 		"<== nxge_init_rxdma_channel_rcrflush"));
631 
632 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
633 	npi_rxdma_rdc_rcr_flush(handle, channel);
634 
635 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
636 		"<== nxge_init_rxdma_channel_rcrflsh"));
637 	return (status);
638 
639 }
640 
641 #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
642 
643 #define	TO_LEFT -1
644 #define	TO_RIGHT 1
645 #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
646 #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
647 #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
648 #define	NO_HINT 0xffffffff
649 
650 /*ARGSUSED*/
651 nxge_status_t
652 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
653 	uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
654 	uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
655 {
656 	int			bufsize;
657 	uint64_t		pktbuf_pp;
658 	uint64_t 		dvma_addr;
659 	rxring_info_t 		*ring_info;
660 	int 			base_side, end_side;
661 	int 			r_index, l_index, anchor_index;
662 	int 			found, search_done;
663 	uint32_t offset, chunk_size, block_size, page_size_mask;
664 	uint32_t chunk_index, block_index, total_index;
665 	int 			max_iterations, iteration;
666 	rxbuf_index_info_t 	*bufinfo;
667 
668 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
669 
670 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
671 		"==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
672 		pkt_buf_addr_pp,
673 		pktbufsz_type));
674 #if defined(__i386)
675 	pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
676 #else
677 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
678 #endif
679 
680 	switch (pktbufsz_type) {
681 	case 0:
682 		bufsize = rbr_p->pkt_buf_size0;
683 		break;
684 	case 1:
685 		bufsize = rbr_p->pkt_buf_size1;
686 		break;
687 	case 2:
688 		bufsize = rbr_p->pkt_buf_size2;
689 		break;
690 	case RCR_SINGLE_BLOCK:
691 		bufsize = 0;
692 		anchor_index = 0;
693 		break;
694 	default:
695 		return (NXGE_ERROR);
696 	}
697 
698 	if (rbr_p->num_blocks == 1) {
699 		anchor_index = 0;
700 		ring_info = rbr_p->ring_info;
701 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
702 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
703 			"==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
704 			"buf_pp $%p btype %d anchor_index %d "
705 			"bufinfo $%p",
706 			pkt_buf_addr_pp,
707 			pktbufsz_type,
708 			anchor_index,
709 			bufinfo));
710 
711 		goto found_index;
712 	}
713 
714 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
715 		"==> nxge_rxbuf_pp_to_vp: "
716 		"buf_pp $%p btype %d  anchor_index %d",
717 		pkt_buf_addr_pp,
718 		pktbufsz_type,
719 		anchor_index));
720 
721 	ring_info = rbr_p->ring_info;
722 	found = B_FALSE;
723 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
724 	iteration = 0;
725 	max_iterations = ring_info->max_iterations;
726 		/*
727 		 * First check if this block has been seen
728 		 * recently. This is indicated by a hint which
729 		 * is initialized when the first buffer of the block
730 		 * is seen. The hint is reset when the last buffer of
731 		 * the block has been processed.
732 		 * As three block sizes are supported, three hints
733 		 * are kept. The idea behind the hints is that once
734 		 * the hardware  uses a block for a buffer  of that
735 		 * size, it will use it exclusively for that size
736 		 * and will use it until it is exhausted. It is assumed
737 		 * that there would a single block being used for the same
738 		 * buffer sizes at any given time.
739 		 */
740 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
741 		anchor_index = ring_info->hint[pktbufsz_type];
742 		dvma_addr =  bufinfo[anchor_index].dvma_addr;
743 		chunk_size = bufinfo[anchor_index].buf_size;
744 		if ((pktbuf_pp >= dvma_addr) &&
745 			(pktbuf_pp < (dvma_addr + chunk_size))) {
746 			found = B_TRUE;
747 				/*
748 				 * check if this is the last buffer in the block
749 				 * If so, then reset the hint for the size;
750 				 */
751 
752 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
753 				ring_info->hint[pktbufsz_type] = NO_HINT;
754 		}
755 	}
756 
757 	if (found == B_FALSE) {
758 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
759 			"==> nxge_rxbuf_pp_to_vp: (!found)"
760 			"buf_pp $%p btype %d anchor_index %d",
761 			pkt_buf_addr_pp,
762 			pktbufsz_type,
763 			anchor_index));
764 
765 			/*
766 			 * This is the first buffer of the block of this
767 			 * size. Need to search the whole information
768 			 * array.
769 			 * the search algorithm uses a binary tree search
770 			 * algorithm. It assumes that the information is
771 			 * already sorted with increasing order
772 			 * info[0] < info[1] < info[2]  .... < info[n-1]
773 			 * where n is the size of the information array
774 			 */
775 		r_index = rbr_p->num_blocks - 1;
776 		l_index = 0;
777 		search_done = B_FALSE;
778 		anchor_index = MID_INDEX(r_index, l_index);
779 		while (search_done == B_FALSE) {
780 			if ((r_index == l_index) ||
781 				(iteration >= max_iterations))
782 				search_done = B_TRUE;
783 			end_side = TO_RIGHT; /* to the right */
784 			base_side = TO_LEFT; /* to the left */
785 			/* read the DVMA address information and sort it */
786 			dvma_addr =  bufinfo[anchor_index].dvma_addr;
787 			chunk_size = bufinfo[anchor_index].buf_size;
788 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
789 				"==> nxge_rxbuf_pp_to_vp: (searching)"
790 				"buf_pp $%p btype %d "
791 				"anchor_index %d chunk_size %d dvmaaddr $%p",
792 				pkt_buf_addr_pp,
793 				pktbufsz_type,
794 				anchor_index,
795 				chunk_size,
796 				dvma_addr));
797 
798 			if (pktbuf_pp >= dvma_addr)
799 				base_side = TO_RIGHT; /* to the right */
800 			if (pktbuf_pp < (dvma_addr + chunk_size))
801 				end_side = TO_LEFT; /* to the left */
802 
803 			switch (base_side + end_side) {
804 				case IN_MIDDLE:
805 					/* found */
806 					found = B_TRUE;
807 					search_done = B_TRUE;
808 					if ((pktbuf_pp + bufsize) <
809 						(dvma_addr + chunk_size))
810 						ring_info->hint[pktbufsz_type] =
811 						bufinfo[anchor_index].buf_index;
812 					break;
813 				case BOTH_RIGHT:
814 						/* not found: go to the right */
815 					l_index = anchor_index + 1;
816 					anchor_index =
817 						MID_INDEX(r_index, l_index);
818 					break;
819 
820 				case  BOTH_LEFT:
821 						/* not found: go to the left */
822 					r_index = anchor_index - 1;
823 					anchor_index = MID_INDEX(r_index,
824 						l_index);
825 					break;
826 				default: /* should not come here */
827 					return (NXGE_ERROR);
828 			}
829 			iteration++;
830 		}
831 
832 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
833 			"==> nxge_rxbuf_pp_to_vp: (search done)"
834 			"buf_pp $%p btype %d anchor_index %d",
835 			pkt_buf_addr_pp,
836 			pktbufsz_type,
837 			anchor_index));
838 	}
839 
840 	if (found == B_FALSE) {
841 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
842 			"==> nxge_rxbuf_pp_to_vp: (search failed)"
843 			"buf_pp $%p btype %d anchor_index %d",
844 			pkt_buf_addr_pp,
845 			pktbufsz_type,
846 			anchor_index));
847 		return (NXGE_ERROR);
848 	}
849 
850 found_index:
851 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
852 		"==> nxge_rxbuf_pp_to_vp: (FOUND1)"
853 		"buf_pp $%p btype %d bufsize %d anchor_index %d",
854 		pkt_buf_addr_pp,
855 		pktbufsz_type,
856 		bufsize,
857 		anchor_index));
858 
859 	/* index of the first block in this chunk */
860 	chunk_index = bufinfo[anchor_index].start_index;
861 	dvma_addr =  bufinfo[anchor_index].dvma_addr;
862 	page_size_mask = ring_info->block_size_mask;
863 
864 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
865 		"==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
866 		"buf_pp $%p btype %d bufsize %d "
867 		"anchor_index %d chunk_index %d dvma $%p",
868 		pkt_buf_addr_pp,
869 		pktbufsz_type,
870 		bufsize,
871 		anchor_index,
872 		chunk_index,
873 		dvma_addr));
874 
875 	offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
876 	block_size = rbr_p->block_size; /* System  block(page) size */
877 
878 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
879 		"==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
880 		"buf_pp $%p btype %d bufsize %d "
881 		"anchor_index %d chunk_index %d dvma $%p "
882 		"offset %d block_size %d",
883 		pkt_buf_addr_pp,
884 		pktbufsz_type,
885 		bufsize,
886 		anchor_index,
887 		chunk_index,
888 		dvma_addr,
889 		offset,
890 		block_size));
891 
892 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
893 
894 	block_index = (offset / block_size); /* index within chunk */
895 	total_index = chunk_index + block_index;
896 
897 
898 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
899 		"==> nxge_rxbuf_pp_to_vp: "
900 		"total_index %d dvma_addr $%p "
901 		"offset %d block_size %d "
902 		"block_index %d ",
903 		total_index, dvma_addr,
904 		offset, block_size,
905 		block_index));
906 #if defined(__i386)
907 	*pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
908 		(uint32_t)offset);
909 #else
910 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
911 		(uint64_t)offset);
912 #endif
913 
914 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
915 		"==> nxge_rxbuf_pp_to_vp: "
916 		"total_index %d dvma_addr $%p "
917 		"offset %d block_size %d "
918 		"block_index %d "
919 		"*pkt_buf_addr_p $%p",
920 		total_index, dvma_addr,
921 		offset, block_size,
922 		block_index,
923 		*pkt_buf_addr_p));
924 
925 
926 	*msg_index = total_index;
927 	*bufoffset =  (offset & page_size_mask);
928 
929 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
930 		"==> nxge_rxbuf_pp_to_vp: get msg index: "
931 		"msg_index %d bufoffset_index %d",
932 		*msg_index,
933 		*bufoffset));
934 
935 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
936 
937 	return (NXGE_OK);
938 }
939 
940 /*
941  * used by quick sort (qsort) function
942  * to perform comparison
943  */
944 static int
945 nxge_sort_compare(const void *p1, const void *p2)
946 {
947 
948 	rxbuf_index_info_t *a, *b;
949 
950 	a = (rxbuf_index_info_t *)p1;
951 	b = (rxbuf_index_info_t *)p2;
952 
953 	if (a->dvma_addr > b->dvma_addr)
954 		return (1);
955 	if (a->dvma_addr < b->dvma_addr)
956 		return (-1);
957 	return (0);
958 }
959 
960 
961 
962 /*
963  * grabbed this sort implementation from common/syscall/avl.c
964  *
965  */
966 /*
967  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
968  * v = Ptr to array/vector of objs
969  * n = # objs in the array
970  * s = size of each obj (must be multiples of a word size)
971  * f = ptr to function to compare two objs
972  *	returns (-1 = less than, 0 = equal, 1 = greater than
973  */
974 void
975 nxge_ksort(caddr_t v, int n, int s, int (*f)())
976 {
977 	int g, i, j, ii;
978 	unsigned int *p1, *p2;
979 	unsigned int tmp;
980 
981 	/* No work to do */
982 	if (v == NULL || n <= 1)
983 		return;
984 	/* Sanity check on arguments */
985 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
986 	ASSERT(s > 0);
987 
988 	for (g = n / 2; g > 0; g /= 2) {
989 		for (i = g; i < n; i++) {
990 			for (j = i - g; j >= 0 &&
991 				(*f)(v + j * s, v + (j + g) * s) == 1;
992 					j -= g) {
993 				p1 = (unsigned *)(v + j * s);
994 				p2 = (unsigned *)(v + (j + g) * s);
995 				for (ii = 0; ii < s / 4; ii++) {
996 					tmp = *p1;
997 					*p1++ = *p2;
998 					*p2++ = tmp;
999 				}
1000 			}
1001 		}
1002 	}
1003 }
1004 
1005 /*
1006  * Initialize data structures required for rxdma
1007  * buffer dvma->vmem address lookup
1008  */
1009 /*ARGSUSED*/
1010 static nxge_status_t
1011 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
1012 {
1013 
1014 	int index;
1015 	rxring_info_t *ring_info;
1016 	int max_iteration = 0, max_index = 0;
1017 
1018 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
1019 
1020 	ring_info = rbrp->ring_info;
1021 	ring_info->hint[0] = NO_HINT;
1022 	ring_info->hint[1] = NO_HINT;
1023 	ring_info->hint[2] = NO_HINT;
1024 	max_index = rbrp->num_blocks;
1025 
1026 		/* read the DVMA address information and sort it */
1027 		/* do init of the information array */
1028 
1029 
1030 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1031 		" nxge_rxbuf_index_info_init Sort ptrs"));
1032 
1033 		/* sort the array */
1034 	nxge_ksort((void *)ring_info->buffer, max_index,
1035 		sizeof (rxbuf_index_info_t), nxge_sort_compare);
1036 
1037 
1038 
1039 	for (index = 0; index < max_index; index++) {
1040 		NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1041 			" nxge_rxbuf_index_info_init: sorted chunk %d "
1042 			" ioaddr $%p kaddr $%p size %x",
1043 			index, ring_info->buffer[index].dvma_addr,
1044 			ring_info->buffer[index].kaddr,
1045 			ring_info->buffer[index].buf_size));
1046 	}
1047 
1048 	max_iteration = 0;
1049 	while (max_index >= (1ULL << max_iteration))
1050 		max_iteration++;
1051 	ring_info->max_iterations = max_iteration + 1;
1052 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1053 		" nxge_rxbuf_index_info_init Find max iter %d",
1054 					ring_info->max_iterations));
1055 
1056 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
1057 	return (NXGE_OK);
1058 }
1059 
1060 /* ARGSUSED */
1061 void
1062 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
1063 {
1064 #ifdef	NXGE_DEBUG
1065 
1066 	uint32_t bptr;
1067 	uint64_t pp;
1068 
1069 	bptr = entry_p->bits.hdw.pkt_buf_addr;
1070 
1071 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1072 		"\trcr entry $%p "
1073 		"\trcr entry 0x%0llx "
1074 		"\trcr entry 0x%08x "
1075 		"\trcr entry 0x%08x "
1076 		"\tvalue 0x%0llx\n"
1077 		"\tmulti = %d\n"
1078 		"\tpkt_type = 0x%x\n"
1079 		"\tzero_copy = %d\n"
1080 		"\tnoport = %d\n"
1081 		"\tpromis = %d\n"
1082 		"\terror = 0x%04x\n"
1083 		"\tdcf_err = 0x%01x\n"
1084 		"\tl2_len = %d\n"
1085 		"\tpktbufsize = %d\n"
1086 		"\tpkt_buf_addr = $%p\n"
1087 		"\tpkt_buf_addr (<< 6) = $%p\n",
1088 		entry_p,
1089 		*(int64_t *)entry_p,
1090 		*(int32_t *)entry_p,
1091 		*(int32_t *)((char *)entry_p + 32),
1092 		entry_p->value,
1093 		entry_p->bits.hdw.multi,
1094 		entry_p->bits.hdw.pkt_type,
1095 		entry_p->bits.hdw.zero_copy,
1096 		entry_p->bits.hdw.noport,
1097 		entry_p->bits.hdw.promis,
1098 		entry_p->bits.hdw.error,
1099 		entry_p->bits.hdw.dcf_err,
1100 		entry_p->bits.hdw.l2_len,
1101 		entry_p->bits.hdw.pktbufsz,
1102 		bptr,
1103 		entry_p->bits.ldw.pkt_buf_addr));
1104 
1105 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
1106 		RCR_PKT_BUF_ADDR_SHIFT;
1107 
1108 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
1109 		pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
1110 #endif
1111 }
1112 
1113 void
1114 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
1115 {
1116 	npi_handle_t		handle;
1117 	rbr_stat_t 		rbr_stat;
1118 	addr44_t 		hd_addr;
1119 	addr44_t 		tail_addr;
1120 	uint16_t 		qlen;
1121 
1122 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1123 		"==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
1124 
1125 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1126 
1127 	/* RBR head */
1128 	hd_addr.addr = 0;
1129 	(void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
1130 #if defined(__i386)
1131 	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1132 		(void *)(uint32_t)hd_addr.addr);
1133 #else
1134 	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1135 		(void *)hd_addr.addr);
1136 #endif
1137 
1138 	/* RBR stats */
1139 	(void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
1140 	printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
1141 
1142 	/* RCR tail */
1143 	tail_addr.addr = 0;
1144 	(void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
1145 #if defined(__i386)
1146 	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1147 		(void *)(uint32_t)tail_addr.addr);
1148 #else
1149 	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1150 		(void *)tail_addr.addr);
1151 #endif
1152 
1153 	/* RCR qlen */
1154 	(void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
1155 	printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
1156 
1157 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1158 		"<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
1159 }
1160 
1161 void
1162 nxge_rxdma_stop(p_nxge_t nxgep)
1163 {
1164 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop"));
1165 
1166 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1167 	(void) nxge_rx_mac_disable(nxgep);
1168 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1169 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop"));
1170 }
1171 
1172 void
1173 nxge_rxdma_stop_reinit(p_nxge_t nxgep)
1174 {
1175 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit"));
1176 
1177 	(void) nxge_rxdma_stop(nxgep);
1178 	(void) nxge_uninit_rxdma_channels(nxgep);
1179 	(void) nxge_init_rxdma_channels(nxgep);
1180 
1181 #ifndef	AXIS_DEBUG_LB
1182 	(void) nxge_xcvr_init(nxgep);
1183 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1184 #endif
1185 	(void) nxge_rx_mac_enable(nxgep);
1186 
1187 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit"));
1188 }
1189 
1190 nxge_status_t
1191 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1192 {
1193 	nxge_grp_set_t *set = &nxgep->rx_set;
1194 	nxge_status_t status;
1195 	npi_status_t rs;
1196 	int rdc;
1197 
1198 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1199 		"==> nxge_rxdma_hw_mode: mode %d", enable));
1200 
1201 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1202 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1203 		    "<== nxge_rxdma_mode: not initialized"));
1204 		return (NXGE_ERROR);
1205 	}
1206 
1207 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1208 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1209 		    "<== nxge_tx_port_fatal_err_recover: "
1210 		    "NULL ring pointer(s)"));
1211 		return (NXGE_ERROR);
1212 	}
1213 
1214 	if (set->owned.map == 0) {
1215 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1216 		    "nxge_rxdma_regs_dump_channels: no channels"));
1217 		return (NULL);
1218 	}
1219 
1220 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1221 		if ((1 << rdc) & set->owned.map) {
1222 			rx_rbr_ring_t *ring =
1223 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
1224 			npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
1225 			if (ring) {
1226 				if (enable) {
1227 					NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1228 					    "==> nxge_rxdma_hw_mode: "
1229 					    "channel %d (enable)", rdc));
1230 					rs = npi_rxdma_cfg_rdc_enable
1231 					    (handle, rdc);
1232 				} else {
1233 					NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1234 					    "==> nxge_rxdma_hw_mode: "
1235 					    "channel %d disable)", rdc));
1236 					rs = npi_rxdma_cfg_rdc_disable
1237 					    (handle, rdc);
1238 				}
1239 			}
1240 		}
1241 	}
1242 
1243 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1244 
1245 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1246 		"<== nxge_rxdma_hw_mode: status 0x%x", status));
1247 
1248 	return (status);
1249 }
1250 
1251 void
1252 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1253 {
1254 	npi_handle_t		handle;
1255 
1256 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1257 		"==> nxge_rxdma_enable_channel: channel %d", channel));
1258 
1259 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1260 	(void) npi_rxdma_cfg_rdc_enable(handle, channel);
1261 
1262 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
1263 }
1264 
1265 void
1266 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1267 {
1268 	npi_handle_t		handle;
1269 
1270 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1271 		"==> nxge_rxdma_disable_channel: channel %d", channel));
1272 
1273 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1274 	(void) npi_rxdma_cfg_rdc_disable(handle, channel);
1275 
1276 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
1277 }
1278 
1279 void
1280 nxge_hw_start_rx(p_nxge_t nxgep)
1281 {
1282 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
1283 
1284 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1285 	(void) nxge_rx_mac_enable(nxgep);
1286 
1287 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
1288 }
1289 
1290 /*ARGSUSED*/
1291 void
1292 nxge_fixup_rxdma_rings(p_nxge_t nxgep)
1293 {
1294 	nxge_grp_set_t *set = &nxgep->rx_set;
1295 	int rdc;
1296 
1297 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
1298 
1299 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1300 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1301 		    "<== nxge_tx_port_fatal_err_recover: "
1302 		    "NULL ring pointer(s)"));
1303 		return;
1304 	}
1305 
1306 	if (set->owned.map == 0) {
1307 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1308 		    "nxge_rxdma_regs_dump_channels: no channels"));
1309 		return;
1310 	}
1311 
1312 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1313 		if ((1 << rdc) & set->owned.map) {
1314 			rx_rbr_ring_t *ring =
1315 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
1316 			if (ring) {
1317 				nxge_rxdma_hw_stop(nxgep, rdc);
1318 				NXGE_DEBUG_MSG((nxgep, RX_CTL,
1319 					"==> nxge_fixup_rxdma_rings: "
1320 					"channel %d ring $%px",
1321 					rdc, ring));
1322 				(void) nxge_rxdma_fixup_channel
1323 				    (nxgep, rdc, rdc);
1324 			}
1325 		}
1326 	}
1327 
1328 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
1329 }
1330 
1331 void
1332 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1333 {
1334 	int		i;
1335 
1336 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
1337 	i = nxge_rxdma_get_ring_index(nxgep, channel);
1338 	if (i < 0) {
1339 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1340 			"<== nxge_rxdma_fix_channel: no entry found"));
1341 		return;
1342 	}
1343 
1344 	nxge_rxdma_fixup_channel(nxgep, channel, i);
1345 
1346 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel"));
1347 }
1348 
1349 void
1350 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry)
1351 {
1352 	int			ndmas;
1353 	p_rx_rbr_rings_t 	rx_rbr_rings;
1354 	p_rx_rbr_ring_t		*rbr_rings;
1355 	p_rx_rcr_rings_t 	rx_rcr_rings;
1356 	p_rx_rcr_ring_t		*rcr_rings;
1357 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
1358 	p_rx_mbox_t		*rx_mbox_p;
1359 	p_nxge_dma_pool_t	dma_buf_poolp;
1360 	p_nxge_dma_pool_t	dma_cntl_poolp;
1361 	p_rx_rbr_ring_t 	rbrp;
1362 	p_rx_rcr_ring_t 	rcrp;
1363 	p_rx_mbox_t 		mboxp;
1364 	p_nxge_dma_common_t 	dmap;
1365 	nxge_status_t		status = NXGE_OK;
1366 
1367 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel"));
1368 
1369 	(void) nxge_rxdma_stop_channel(nxgep, channel);
1370 
1371 	dma_buf_poolp = nxgep->rx_buf_pool_p;
1372 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
1373 
1374 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1375 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1376 			"<== nxge_rxdma_fixup_channel: buf not allocated"));
1377 		return;
1378 	}
1379 
1380 	ndmas = dma_buf_poolp->ndmas;
1381 	if (!ndmas) {
1382 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1383 			"<== nxge_rxdma_fixup_channel: no dma allocated"));
1384 		return;
1385 	}
1386 
1387 	rx_rbr_rings = nxgep->rx_rbr_rings;
1388 	rx_rcr_rings = nxgep->rx_rcr_rings;
1389 	rbr_rings = rx_rbr_rings->rbr_rings;
1390 	rcr_rings = rx_rcr_rings->rcr_rings;
1391 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
1392 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
1393 
1394 	/* Reinitialize the receive block and completion rings */
1395 	rbrp = (p_rx_rbr_ring_t)rbr_rings[entry],
1396 	rcrp = (p_rx_rcr_ring_t)rcr_rings[entry],
1397 	mboxp = (p_rx_mbox_t)rx_mbox_p[entry];
1398 
1399 
1400 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
1401 	rbrp->rbr_rd_index = 0;
1402 	rcrp->comp_rd_index = 0;
1403 	rcrp->comp_wt_index = 0;
1404 
1405 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
1406 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
1407 
1408 	status = nxge_rxdma_start_channel(nxgep, channel,
1409 			rbrp, rcrp, mboxp);
1410 	if (status != NXGE_OK) {
1411 		goto nxge_rxdma_fixup_channel_fail;
1412 	}
1413 	if (status != NXGE_OK) {
1414 		goto nxge_rxdma_fixup_channel_fail;
1415 	}
1416 
1417 nxge_rxdma_fixup_channel_fail:
1418 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1419 		"==> nxge_rxdma_fixup_channel: failed (0x%08x)", status));
1420 
1421 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel"));
1422 }
1423 
1424 /* ARGSUSED */
1425 int
1426 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel)
1427 {
1428 	return (channel);
1429 }
1430 
1431 p_rx_rbr_ring_t
1432 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
1433 {
1434 	nxge_grp_set_t *set = &nxgep->rx_set;
1435 	nxge_channel_t rdc;
1436 
1437 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1438 		"==> nxge_rxdma_get_rbr_ring: channel %d", channel));
1439 
1440 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1441 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1442 		    "<== nxge_rxdma_get_rbr_ring: "
1443 		    "NULL ring pointer(s)"));
1444 		return (NULL);
1445 	}
1446 
1447 	if (set->owned.map == 0) {
1448 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1449 		    "<== nxge_rxdma_get_rbr_ring: no channels"));
1450 		return (NULL);
1451 	}
1452 
1453 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1454 		if ((1 << rdc) & set->owned.map) {
1455 			rx_rbr_ring_t *ring =
1456 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
1457 			if (ring) {
1458 				if (channel == ring->rdc) {
1459 					NXGE_DEBUG_MSG((nxgep, RX_CTL,
1460 					    "==> nxge_rxdma_get_rbr_ring: "
1461 					    "channel %d ring $%p", rdc, ring));
1462 					return (ring);
1463 				}
1464 			}
1465 		}
1466 	}
1467 
1468 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1469 		"<== nxge_rxdma_get_rbr_ring: not found"));
1470 
1471 	return (NULL);
1472 }
1473 
1474 p_rx_rcr_ring_t
1475 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
1476 {
1477 	nxge_grp_set_t *set = &nxgep->rx_set;
1478 	nxge_channel_t rdc;
1479 
1480 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1481 		"==> nxge_rxdma_get_rcr_ring: channel %d", channel));
1482 
1483 	if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) {
1484 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1485 		    "<== nxge_rxdma_get_rcr_ring: "
1486 		    "NULL ring pointer(s)"));
1487 		return (NULL);
1488 	}
1489 
1490 	if (set->owned.map == 0) {
1491 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1492 		    "<== nxge_rxdma_get_rbr_ring: no channels"));
1493 		return (NULL);
1494 	}
1495 
1496 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1497 		if ((1 << rdc) & set->owned.map) {
1498 			rx_rcr_ring_t *ring =
1499 			    nxgep->rx_rcr_rings->rcr_rings[rdc];
1500 			if (ring) {
1501 				if (channel == ring->rdc) {
1502 					NXGE_DEBUG_MSG((nxgep, RX_CTL,
1503 					    "==> nxge_rxdma_get_rcr_ring: "
1504 					    "channel %d ring $%p", rdc, ring));
1505 					return (ring);
1506 				}
1507 			}
1508 		}
1509 	}
1510 
1511 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1512 		"<== nxge_rxdma_get_rcr_ring: not found"));
1513 
1514 	return (NULL);
1515 }
1516 
1517 /*
1518  * Static functions start here.
1519  */
1520 static p_rx_msg_t
1521 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
1522 {
1523 	p_rx_msg_t nxge_mp 		= NULL;
1524 	p_nxge_dma_common_t		dmamsg_p;
1525 	uchar_t 			*buffer;
1526 
1527 	nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
1528 	if (nxge_mp == NULL) {
1529 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1530 			"Allocation of a rx msg failed."));
1531 		goto nxge_allocb_exit;
1532 	}
1533 
1534 	nxge_mp->use_buf_pool = B_FALSE;
1535 	if (dmabuf_p) {
1536 		nxge_mp->use_buf_pool = B_TRUE;
1537 		dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
1538 		*dmamsg_p = *dmabuf_p;
1539 		dmamsg_p->nblocks = 1;
1540 		dmamsg_p->block_size = size;
1541 		dmamsg_p->alength = size;
1542 		buffer = (uchar_t *)dmabuf_p->kaddrp;
1543 
1544 		dmabuf_p->kaddrp = (void *)
1545 				((char *)dmabuf_p->kaddrp + size);
1546 		dmabuf_p->ioaddr_pp = (void *)
1547 				((char *)dmabuf_p->ioaddr_pp + size);
1548 		dmabuf_p->alength -= size;
1549 		dmabuf_p->offset += size;
1550 		dmabuf_p->dma_cookie.dmac_laddress += size;
1551 		dmabuf_p->dma_cookie.dmac_size -= size;
1552 
1553 	} else {
1554 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
1555 		if (buffer == NULL) {
1556 			NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1557 				"Allocation of a receive page failed."));
1558 			goto nxge_allocb_fail1;
1559 		}
1560 	}
1561 
1562 	nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
1563 	if (nxge_mp->rx_mblk_p == NULL) {
1564 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
1565 		goto nxge_allocb_fail2;
1566 	}
1567 
1568 	nxge_mp->buffer = buffer;
1569 	nxge_mp->block_size = size;
1570 	nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
1571 	nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
1572 	nxge_mp->ref_cnt = 1;
1573 	nxge_mp->free = B_TRUE;
1574 	nxge_mp->rx_use_bcopy = B_FALSE;
1575 
1576 	atomic_inc_32(&nxge_mblks_pending);
1577 
1578 	goto nxge_allocb_exit;
1579 
1580 nxge_allocb_fail2:
1581 	if (!nxge_mp->use_buf_pool) {
1582 		KMEM_FREE(buffer, size);
1583 	}
1584 
1585 nxge_allocb_fail1:
1586 	KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
1587 	nxge_mp = NULL;
1588 
1589 nxge_allocb_exit:
1590 	return (nxge_mp);
1591 }
1592 
1593 p_mblk_t
1594 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1595 {
1596 	p_mblk_t mp;
1597 
1598 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
1599 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
1600 		"offset = 0x%08X "
1601 		"size = 0x%08X",
1602 		nxge_mp, offset, size));
1603 
1604 	mp = desballoc(&nxge_mp->buffer[offset], size,
1605 				0, &nxge_mp->freeb);
1606 	if (mp == NULL) {
1607 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1608 		goto nxge_dupb_exit;
1609 	}
1610 	atomic_inc_32(&nxge_mp->ref_cnt);
1611 
1612 
1613 nxge_dupb_exit:
1614 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1615 		nxge_mp));
1616 	return (mp);
1617 }
1618 
1619 p_mblk_t
1620 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1621 {
1622 	p_mblk_t mp;
1623 	uchar_t *dp;
1624 
1625 	mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
1626 	if (mp == NULL) {
1627 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1628 		goto nxge_dupb_bcopy_exit;
1629 	}
1630 	dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
1631 	bcopy((void *)&nxge_mp->buffer[offset], dp, size);
1632 	mp->b_wptr = dp + size;
1633 
1634 nxge_dupb_bcopy_exit:
1635 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1636 		nxge_mp));
1637 	return (mp);
1638 }
1639 
1640 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
1641 	p_rx_msg_t rx_msg_p);
1642 
1643 void
1644 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
1645 {
1646 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
1647 
1648 	/* Reuse this buffer */
1649 	rx_msg_p->free = B_FALSE;
1650 	rx_msg_p->cur_usage_cnt = 0;
1651 	rx_msg_p->max_usage_cnt = 0;
1652 	rx_msg_p->pkt_buf_size = 0;
1653 
1654 	if (rx_rbr_p->rbr_use_bcopy) {
1655 		rx_msg_p->rx_use_bcopy = B_FALSE;
1656 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
1657 	}
1658 
1659 	/*
1660 	 * Get the rbr header pointer and its offset index.
1661 	 */
1662 	MUTEX_ENTER(&rx_rbr_p->post_lock);
1663 	rx_rbr_p->rbr_wr_index =  ((rx_rbr_p->rbr_wr_index + 1) &
1664 					    rx_rbr_p->rbr_wrap_mask);
1665 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1666 	MUTEX_EXIT(&rx_rbr_p->post_lock);
1667 	npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep),
1668 	    rx_rbr_p->rdc, 1);
1669 
1670 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1671 		"<== nxge_post_page (channel %d post_next_index %d)",
1672 		rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1673 
1674 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
1675 }
1676 
1677 void
1678 nxge_freeb(p_rx_msg_t rx_msg_p)
1679 {
1680 	size_t size;
1681 	uchar_t *buffer = NULL;
1682 	int ref_cnt;
1683 	boolean_t free_state = B_FALSE;
1684 
1685 	rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
1686 
1687 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
1688 	NXGE_DEBUG_MSG((NULL, MEM2_CTL,
1689 		"nxge_freeb:rx_msg_p = $%p (block pending %d)",
1690 		rx_msg_p, nxge_mblks_pending));
1691 
1692 	/*
1693 	 * First we need to get the free state, then
1694 	 * atomic decrement the reference count to prevent
1695 	 * the race condition with the interrupt thread that
1696 	 * is processing a loaned up buffer block.
1697 	 */
1698 	free_state = rx_msg_p->free;
1699 	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
1700 	if (!ref_cnt) {
1701 		atomic_dec_32(&nxge_mblks_pending);
1702 		buffer = rx_msg_p->buffer;
1703 		size = rx_msg_p->block_size;
1704 		NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
1705 			"will free: rx_msg_p = $%p (block pending %d)",
1706 			rx_msg_p, nxge_mblks_pending));
1707 
1708 		if (!rx_msg_p->use_buf_pool) {
1709 			KMEM_FREE(buffer, size);
1710 		}
1711 
1712 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1713 
1714 		if (ring) {
1715 			/*
1716 			 * Decrement the receive buffer ring's reference
1717 			 * count, too.
1718 			 */
1719 			atomic_dec_32(&ring->rbr_ref_cnt);
1720 
1721 			/*
1722 			 * Free the receive buffer ring, if
1723 			 * 1. all the receive buffers have been freed
1724 			 * 2. and we are in the proper state (that is,
1725 			 *    we are not UNMAPPING).
1726 			 */
1727 			if (ring->rbr_ref_cnt == 0 &&
1728 			    ring->rbr_state == RBR_UNMAPPED) {
1729 				/*
1730 				 * Free receive data buffers,
1731 				 * buffer index information
1732 				 * (rxring_info) and
1733 				 * the message block ring.
1734 				 */
1735 				NXGE_DEBUG_MSG((NULL, RX_CTL,
1736 				    "nxge_freeb:rx_msg_p = $%p "
1737 				    "(block pending %d) free buffers",
1738 				    rx_msg_p, nxge_mblks_pending));
1739 				nxge_rxdma_databuf_free(ring);
1740 				if (ring->ring_info) {
1741 					KMEM_FREE(ring->ring_info,
1742 					    sizeof (rxring_info_t));
1743 				}
1744 
1745 				if (ring->rx_msg_ring) {
1746 					KMEM_FREE(ring->rx_msg_ring,
1747 					    ring->tnblocks *
1748 					    sizeof (p_rx_msg_t));
1749 				}
1750 				KMEM_FREE(ring, sizeof (*ring));
1751 			}
1752 		}
1753 		return;
1754 	}
1755 
1756 	/*
1757 	 * Repost buffer.
1758 	 */
1759 	if (free_state && (ref_cnt == 1) && ring) {
1760 		NXGE_DEBUG_MSG((NULL, RX_CTL,
1761 		    "nxge_freeb: post page $%p:", rx_msg_p));
1762 		if (ring->rbr_state == RBR_POSTING)
1763 			nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
1764 	}
1765 
1766 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
1767 }
1768 
1769 uint_t
1770 nxge_rx_intr(void *arg1, void *arg2)
1771 {
1772 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
1773 	p_nxge_t		nxgep = (p_nxge_t)arg2;
1774 	p_nxge_ldg_t		ldgp;
1775 	uint8_t			channel;
1776 	npi_handle_t		handle;
1777 	rx_dma_ctl_stat_t	cs;
1778 
1779 #ifdef	NXGE_DEBUG
1780 	rxdma_cfig1_t		cfg;
1781 #endif
1782 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
1783 
1784 	if (ldvp == NULL) {
1785 		NXGE_DEBUG_MSG((NULL, INT_CTL,
1786 			"<== nxge_rx_intr: arg2 $%p arg1 $%p",
1787 			nxgep, ldvp));
1788 
1789 		return (DDI_INTR_CLAIMED);
1790 	}
1791 
1792 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1793 		nxgep = ldvp->nxgep;
1794 	}
1795 
1796 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1797 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1798 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1799 		    "<== nxge_rx_intr: interface not started or intialized"));
1800 		return (DDI_INTR_CLAIMED);
1801 	}
1802 
1803 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1804 		"==> nxge_rx_intr: arg2 $%p arg1 $%p",
1805 		nxgep, ldvp));
1806 
1807 	/*
1808 	 * This interrupt handler is for a specific
1809 	 * receive dma channel.
1810 	 */
1811 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1812 	/*
1813 	 * Get the control and status for this channel.
1814 	 */
1815 	channel = ldvp->channel;
1816 	ldgp = ldvp->ldgp;
1817 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
1818 
1819 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
1820 		"cs 0x%016llx rcrto 0x%x rcrthres %x",
1821 		channel,
1822 		cs.value,
1823 		cs.bits.hdw.rcrto,
1824 		cs.bits.hdw.rcrthres));
1825 
1826 	nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs);
1827 	serviced = DDI_INTR_CLAIMED;
1828 
1829 	/* error events. */
1830 	if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1831 		(void) nxge_rx_err_evnts(nxgep, channel, cs);
1832 	}
1833 
1834 nxge_intr_exit:
1835 	/*
1836 	 * Enable the mailbox update interrupt if we want
1837 	 * to use mailbox. We probably don't need to use
1838 	 * mailbox as it only saves us one pio read.
1839 	 * Also write 1 to rcrthres and rcrto to clear
1840 	 * these two edge triggered bits.
1841 	 */
1842 
1843 	cs.value &= RX_DMA_CTL_STAT_WR1C;
1844 	cs.bits.hdw.mex = 1;
1845 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1846 			cs.value);
1847 
1848 	/*
1849 	 * Rearm this logical group if this is a single device
1850 	 * group.
1851 	 */
1852 	if (ldgp->nldvs == 1) {
1853 		ldgimgm_t		mgm;
1854 		mgm.value = 0;
1855 		mgm.bits.ldw.arm = 1;
1856 		mgm.bits.ldw.timer = ldgp->ldg_timer;
1857 		if (isLDOMguest(nxgep)) {
1858 			nxge_hio_ldgimgn(nxgep, ldgp);
1859 		} else {
1860 			NXGE_REG_WR64(handle,
1861 			    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1862 			    mgm.value);
1863 		}
1864 	}
1865 
1866 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d",
1867 		serviced));
1868 	return (serviced);
1869 }
1870 
1871 /*
1872  * Process the packets received in the specified logical device
1873  * and pass up a chain of message blocks to the upper layer.
1874  */
1875 static void
1876 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs)
1877 {
1878 	p_mblk_t		mp;
1879 	p_rx_rcr_ring_t		rcrp;
1880 
1881 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring"));
1882 	rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex];
1883 	if (rcrp->poll_flag) {
1884 		/* It is in the poll mode */
1885 		return;
1886 	}
1887 
1888 	if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) {
1889 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1890 			"<== nxge_rx_pkts_vring: no mp"));
1891 		return;
1892 	}
1893 
1894 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p",
1895 		mp));
1896 
1897 #ifdef  NXGE_DEBUG
1898 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1899 			"==> nxge_rx_pkts_vring:calling mac_rx "
1900 			"LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p "
1901 			"mac_handle $%p",
1902 			mp->b_wptr - mp->b_rptr,
1903 			mp, mp->b_cont, mp->b_next,
1904 			rcrp, rcrp->rcr_mac_handle));
1905 
1906 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1907 			"==> nxge_rx_pkts_vring: dump packets "
1908 			"(mp $%p b_rptr $%p b_wptr $%p):\n %s",
1909 			mp,
1910 			mp->b_rptr,
1911 			mp->b_wptr,
1912 			nxge_dump_packet((char *)mp->b_rptr,
1913 			mp->b_wptr - mp->b_rptr)));
1914 		if (mp->b_cont) {
1915 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
1916 				"==> nxge_rx_pkts_vring: dump b_cont packets "
1917 				"(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
1918 				mp->b_cont,
1919 				mp->b_cont->b_rptr,
1920 				mp->b_cont->b_wptr,
1921 				nxge_dump_packet((char *)mp->b_cont->b_rptr,
1922 				mp->b_cont->b_wptr - mp->b_cont->b_rptr)));
1923 		}
1924 		if (mp->b_next) {
1925 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
1926 				"==> nxge_rx_pkts_vring: dump next packets "
1927 				"(b_rptr $%p): %s",
1928 				mp->b_next->b_rptr,
1929 				nxge_dump_packet((char *)mp->b_next->b_rptr,
1930 				mp->b_next->b_wptr - mp->b_next->b_rptr)));
1931 		}
1932 #endif
1933 
1934 	if (!isLDOMguest(nxgep))
1935 		mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp);
1936 #if defined(sun4v)
1937 	else {			/* isLDOMguest(nxgep) */
1938 		nxge_hio_data_t *nhd = (nxge_hio_data_t *)
1939 		    nxgep->nxge_hw_p->hio;
1940 		nx_vio_fp_t *vio = &nhd->hio.vio;
1941 
1942 		if (vio->cb.vio_net_rx_cb) {
1943 			(*vio->cb.vio_net_rx_cb)
1944 			    (nxgep->hio_vr->vhp, mp);
1945 		}
1946 	}
1947 #endif
1948 }
1949 
1950 
1951 /*
1952  * This routine is the main packet receive processing function.
1953  * It gets the packet type, error code, and buffer related
1954  * information from the receive completion entry.
1955  * How many completion entries to process is based on the number of packets
1956  * queued by the hardware, a hardware maintained tail pointer
1957  * and a configurable receive packet count.
1958  *
1959  * A chain of message blocks will be created as result of processing
1960  * the completion entries. This chain of message blocks will be returned and
1961  * a hardware control status register will be updated with the number of
1962  * packets were removed from the hardware queue.
1963  *
1964  */
1965 static mblk_t *
1966 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
1967     int bytes_to_pickup)
1968 {
1969 	npi_handle_t		handle;
1970 	uint8_t			channel;
1971 	uint32_t		comp_rd_index;
1972 	p_rcr_entry_t		rcr_desc_rd_head_p;
1973 	p_rcr_entry_t		rcr_desc_rd_head_pp;
1974 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
1975 	uint16_t		qlen, nrcr_read, npkt_read;
1976 	uint32_t		qlen_hw;
1977 	boolean_t		multi;
1978 	rcrcfig_b_t		rcr_cfg_b;
1979 	int			totallen = 0;
1980 #if defined(_BIG_ENDIAN)
1981 	npi_status_t		rs = NPI_SUCCESS;
1982 #endif
1983 
1984 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
1985 		"channel %d", rcr_p->rdc));
1986 
1987 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1988 		return (NULL);
1989 	}
1990 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1991 	channel = rcr_p->rdc;
1992 
1993 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1994 		"==> nxge_rx_pkts: START: rcr channel %d "
1995 		"head_p $%p head_pp $%p  index %d ",
1996 		channel, rcr_p->rcr_desc_rd_head_p,
1997 		rcr_p->rcr_desc_rd_head_pp,
1998 		rcr_p->comp_rd_index));
1999 
2000 
2001 #if !defined(_BIG_ENDIAN)
2002 	qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
2003 #else
2004 	rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
2005 	if (rs != NPI_SUCCESS) {
2006 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
2007 		"channel %d, get qlen failed 0x%08x",
2008 		channel, rs));
2009 		return (NULL);
2010 	}
2011 #endif
2012 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
2013 		"qlen %d", channel, qlen));
2014 
2015 
2016 
2017 	if (!qlen) {
2018 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2019 			"==> nxge_rx_pkts:rcr channel %d "
2020 			"qlen %d (no pkts)", channel, qlen));
2021 
2022 		return (NULL);
2023 	}
2024 
2025 	comp_rd_index = rcr_p->comp_rd_index;
2026 
2027 	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
2028 	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
2029 	nrcr_read = npkt_read = 0;
2030 
2031 	/*
2032 	 * Number of packets queued
2033 	 * (The jumbo or multi packet will be counted as only one
2034 	 *  packets and it may take up more than one completion entry).
2035 	 */
2036 	qlen_hw = (qlen < nxge_max_rx_pkts) ?
2037 		qlen : nxge_max_rx_pkts;
2038 	head_mp = NULL;
2039 	tail_mp = &head_mp;
2040 	nmp = mp_cont = NULL;
2041 	multi = B_FALSE;
2042 
2043 	while (qlen_hw) {
2044 
2045 #ifdef NXGE_DEBUG
2046 		nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
2047 #endif
2048 		/*
2049 		 * Process one completion ring entry.
2050 		 */
2051 		nxge_receive_packet(nxgep,
2052 			rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
2053 
2054 		/*
2055 		 * message chaining modes
2056 		 */
2057 		if (nmp) {
2058 			nmp->b_next = NULL;
2059 			if (!multi && !mp_cont) { /* frame fits a partition */
2060 				*tail_mp = nmp;
2061 				tail_mp = &nmp->b_next;
2062 				totallen += MBLKL(nmp);
2063 				nmp = NULL;
2064 			} else if (multi && !mp_cont) { /* first segment */
2065 				*tail_mp = nmp;
2066 				tail_mp = &nmp->b_cont;
2067 				totallen += MBLKL(nmp);
2068 			} else if (multi && mp_cont) {	/* mid of multi segs */
2069 				*tail_mp = mp_cont;
2070 				tail_mp = &mp_cont->b_cont;
2071 				totallen += MBLKL(mp_cont);
2072 			} else if (!multi && mp_cont) { /* last segment */
2073 				*tail_mp = mp_cont;
2074 				tail_mp = &nmp->b_next;
2075 				totallen += MBLKL(mp_cont);
2076 				nmp = NULL;
2077 			}
2078 		}
2079 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2080 			"==> nxge_rx_pkts: loop: rcr channel %d "
2081 			"before updating: multi %d "
2082 			"nrcr_read %d "
2083 			"npk read %d "
2084 			"head_pp $%p  index %d ",
2085 			channel,
2086 			multi,
2087 			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2088 			comp_rd_index));
2089 
2090 		if (!multi) {
2091 			qlen_hw--;
2092 			npkt_read++;
2093 		}
2094 
2095 		/*
2096 		 * Update the next read entry.
2097 		 */
2098 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
2099 					rcr_p->comp_wrap_mask);
2100 
2101 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
2102 				rcr_p->rcr_desc_first_p,
2103 				rcr_p->rcr_desc_last_p);
2104 
2105 		nrcr_read++;
2106 
2107 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2108 			"<== nxge_rx_pkts: (SAM, process one packet) "
2109 			"nrcr_read %d",
2110 			nrcr_read));
2111 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2112 			"==> nxge_rx_pkts: loop: rcr channel %d "
2113 			"multi %d "
2114 			"nrcr_read %d "
2115 			"npk read %d "
2116 			"head_pp $%p  index %d ",
2117 			channel,
2118 			multi,
2119 			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2120 			comp_rd_index));
2121 
2122 		if ((bytes_to_pickup != -1) &&
2123 		    (totallen >= bytes_to_pickup)) {
2124 			break;
2125 		}
2126 	}
2127 
2128 	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
2129 	rcr_p->comp_rd_index = comp_rd_index;
2130 	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
2131 
2132 	if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
2133 		(nxgep->intr_threshold != rcr_p->intr_threshold)) {
2134 		rcr_p->intr_timeout = nxgep->intr_timeout;
2135 		rcr_p->intr_threshold = nxgep->intr_threshold;
2136 		rcr_cfg_b.value = 0x0ULL;
2137 		if (rcr_p->intr_timeout)
2138 			rcr_cfg_b.bits.ldw.entout = 1;
2139 		rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
2140 		rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
2141 		RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
2142 				    channel, rcr_cfg_b.value);
2143 	}
2144 
2145 	cs.bits.ldw.pktread = npkt_read;
2146 	cs.bits.ldw.ptrread = nrcr_read;
2147 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2148 			    channel, cs.value);
2149 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
2150 		"==> nxge_rx_pkts: EXIT: rcr channel %d "
2151 		"head_pp $%p  index %016llx ",
2152 		channel,
2153 		rcr_p->rcr_desc_rd_head_pp,
2154 		rcr_p->comp_rd_index));
2155 	/*
2156 	 * Update RCR buffer pointer read and number of packets
2157 	 * read.
2158 	 */
2159 
2160 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts"));
2161 	return (head_mp);
2162 }
2163 
2164 void
2165 nxge_receive_packet(p_nxge_t nxgep,
2166     p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
2167     boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
2168 {
2169 	p_mblk_t		nmp = NULL;
2170 	uint64_t		multi;
2171 	uint64_t		dcf_err;
2172 	uint8_t			channel;
2173 
2174 	boolean_t		first_entry = B_TRUE;
2175 	boolean_t		is_tcp_udp = B_FALSE;
2176 	boolean_t		buffer_free = B_FALSE;
2177 	boolean_t		error_send_up = B_FALSE;
2178 	uint8_t			error_type;
2179 	uint16_t		l2_len;
2180 	uint16_t		skip_len;
2181 	uint8_t			pktbufsz_type;
2182 	uint64_t		rcr_entry;
2183 	uint64_t		*pkt_buf_addr_pp;
2184 	uint64_t		*pkt_buf_addr_p;
2185 	uint32_t		buf_offset;
2186 	uint32_t		bsize;
2187 	uint32_t		error_disp_cnt;
2188 	uint32_t		msg_index;
2189 	p_rx_rbr_ring_t		rx_rbr_p;
2190 	p_rx_msg_t 		*rx_msg_ring_p;
2191 	p_rx_msg_t		rx_msg_p;
2192 	uint16_t		sw_offset_bytes = 0, hdr_size = 0;
2193 	nxge_status_t		status = NXGE_OK;
2194 	boolean_t		is_valid = B_FALSE;
2195 	p_nxge_rx_ring_stats_t	rdc_stats;
2196 	uint32_t		bytes_read;
2197 	uint64_t		pkt_type;
2198 	uint64_t		frag;
2199 	boolean_t		pkt_too_long_err = B_FALSE;
2200 #ifdef	NXGE_DEBUG
2201 	int			dump_len;
2202 #endif
2203 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
2204 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
2205 
2206 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
2207 
2208 	multi = (rcr_entry & RCR_MULTI_MASK);
2209 	dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
2210 	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
2211 
2212 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
2213 	frag = (rcr_entry & RCR_FRAG_MASK);
2214 
2215 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
2216 
2217 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
2218 				RCR_PKTBUFSZ_SHIFT);
2219 #if defined(__i386)
2220 	pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
2221 			RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
2222 #else
2223 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
2224 			RCR_PKT_BUF_ADDR_SHIFT);
2225 #endif
2226 
2227 	channel = rcr_p->rdc;
2228 
2229 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2230 		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2231 		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2232 		"error_type 0x%x pkt_type 0x%x  "
2233 		"pktbufsz_type %d ",
2234 		rcr_desc_rd_head_p,
2235 		rcr_entry, pkt_buf_addr_pp, l2_len,
2236 		multi,
2237 		error_type,
2238 		pkt_type,
2239 		pktbufsz_type));
2240 
2241 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2242 		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2243 		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2244 		"error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
2245 		rcr_entry, pkt_buf_addr_pp, l2_len,
2246 		multi,
2247 		error_type,
2248 		pkt_type));
2249 
2250 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2251 		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
2252 		"full pkt_buf_addr_pp $%p l2_len %d",
2253 		rcr_entry, pkt_buf_addr_pp, l2_len));
2254 
2255 	/* get the stats ptr */
2256 	rdc_stats = rcr_p->rdc_stats;
2257 
2258 	if (!l2_len) {
2259 
2260 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2261 			"<== nxge_receive_packet: failed: l2 length is 0."));
2262 		return;
2263 	}
2264 
2265 	/*
2266 	 * Sofware workaround for BMAC hardware limitation that allows
2267 	 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2268 	 * instead of 0x2400 for jumbo.
2269 	 */
2270 	if (l2_len > nxgep->mac.maxframesize) {
2271 		pkt_too_long_err = B_TRUE;
2272 	}
2273 
2274 	/* Hardware sends us 4 bytes of CRC as no stripping is done.  */
2275 	l2_len -= ETHERFCSL;
2276 
2277 	/* shift 6 bits to get the full io address */
2278 #if defined(__i386)
2279 	pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
2280 				RCR_PKT_BUF_ADDR_SHIFT_FULL);
2281 #else
2282 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
2283 				RCR_PKT_BUF_ADDR_SHIFT_FULL);
2284 #endif
2285 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2286 		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
2287 		"full pkt_buf_addr_pp $%p l2_len %d",
2288 		rcr_entry, pkt_buf_addr_pp, l2_len));
2289 
2290 	rx_rbr_p = rcr_p->rx_rbr_p;
2291 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
2292 
2293 	if (first_entry) {
2294 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
2295 			RXDMA_HDR_SIZE_DEFAULT);
2296 
2297 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2298 			"==> nxge_receive_packet: first entry 0x%016llx "
2299 			"pkt_buf_addr_pp $%p l2_len %d hdr %d",
2300 			rcr_entry, pkt_buf_addr_pp, l2_len,
2301 			hdr_size));
2302 	}
2303 
2304 	MUTEX_ENTER(&rcr_p->lock);
2305 	MUTEX_ENTER(&rx_rbr_p->lock);
2306 
2307 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
2308 		"==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2309 		"full pkt_buf_addr_pp $%p l2_len %d",
2310 		rcr_entry, pkt_buf_addr_pp, l2_len));
2311 
2312 	/*
2313 	 * Packet buffer address in the completion entry points
2314 	 * to the starting buffer address (offset 0).
2315 	 * Use the starting buffer address to locate the corresponding
2316 	 * kernel address.
2317 	 */
2318 	status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
2319 			pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
2320 			&buf_offset,
2321 			&msg_index);
2322 
2323 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
2324 		"==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2325 		"full pkt_buf_addr_pp $%p l2_len %d",
2326 		rcr_entry, pkt_buf_addr_pp, l2_len));
2327 
2328 	if (status != NXGE_OK) {
2329 		MUTEX_EXIT(&rx_rbr_p->lock);
2330 		MUTEX_EXIT(&rcr_p->lock);
2331 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2332 			"<== nxge_receive_packet: found vaddr failed %d",
2333 				status));
2334 		return;
2335 	}
2336 
2337 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2338 		"==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
2339 		"full pkt_buf_addr_pp $%p l2_len %d",
2340 		rcr_entry, pkt_buf_addr_pp, l2_len));
2341 
2342 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2343 		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2344 		"full pkt_buf_addr_pp $%p l2_len %d",
2345 		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2346 
2347 	rx_msg_p = rx_msg_ring_p[msg_index];
2348 
2349 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2350 		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2351 		"full pkt_buf_addr_pp $%p l2_len %d",
2352 		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2353 
2354 	switch (pktbufsz_type) {
2355 	case RCR_PKTBUFSZ_0:
2356 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
2357 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2358 			"==> nxge_receive_packet: 0 buf %d", bsize));
2359 		break;
2360 	case RCR_PKTBUFSZ_1:
2361 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
2362 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2363 			"==> nxge_receive_packet: 1 buf %d", bsize));
2364 		break;
2365 	case RCR_PKTBUFSZ_2:
2366 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
2367 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2368 			"==> nxge_receive_packet: 2 buf %d", bsize));
2369 		break;
2370 	case RCR_SINGLE_BLOCK:
2371 		bsize = rx_msg_p->block_size;
2372 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2373 			"==> nxge_receive_packet: single %d", bsize));
2374 
2375 		break;
2376 	default:
2377 		MUTEX_EXIT(&rx_rbr_p->lock);
2378 		MUTEX_EXIT(&rcr_p->lock);
2379 		return;
2380 	}
2381 
2382 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
2383 		(buf_offset + sw_offset_bytes),
2384 		(hdr_size + l2_len),
2385 		DDI_DMA_SYNC_FORCPU);
2386 
2387 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2388 		"==> nxge_receive_packet: after first dump:usage count"));
2389 
2390 	if (rx_msg_p->cur_usage_cnt == 0) {
2391 		if (rx_rbr_p->rbr_use_bcopy) {
2392 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
2393 			if (rx_rbr_p->rbr_consumed <
2394 					rx_rbr_p->rbr_threshold_hi) {
2395 				if (rx_rbr_p->rbr_threshold_lo == 0 ||
2396 					((rx_rbr_p->rbr_consumed >=
2397 						rx_rbr_p->rbr_threshold_lo) &&
2398 						(rx_rbr_p->rbr_bufsize_type >=
2399 							pktbufsz_type))) {
2400 					rx_msg_p->rx_use_bcopy = B_TRUE;
2401 				}
2402 			} else {
2403 				rx_msg_p->rx_use_bcopy = B_TRUE;
2404 			}
2405 		}
2406 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2407 			"==> nxge_receive_packet: buf %d (new block) ",
2408 			bsize));
2409 
2410 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
2411 		rx_msg_p->pkt_buf_size = bsize;
2412 		rx_msg_p->cur_usage_cnt = 1;
2413 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
2414 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2415 				"==> nxge_receive_packet: buf %d "
2416 				"(single block) ",
2417 				bsize));
2418 			/*
2419 			 * Buffer can be reused once the free function
2420 			 * is called.
2421 			 */
2422 			rx_msg_p->max_usage_cnt = 1;
2423 			buffer_free = B_TRUE;
2424 		} else {
2425 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
2426 			if (rx_msg_p->max_usage_cnt == 1) {
2427 				buffer_free = B_TRUE;
2428 			}
2429 		}
2430 	} else {
2431 		rx_msg_p->cur_usage_cnt++;
2432 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
2433 			buffer_free = B_TRUE;
2434 		}
2435 	}
2436 
2437 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
2438 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
2439 		msg_index, l2_len,
2440 		rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
2441 
2442 	if ((error_type) || (dcf_err) || (pkt_too_long_err)) {
2443 		rdc_stats->ierrors++;
2444 		if (dcf_err) {
2445 			rdc_stats->dcf_err++;
2446 #ifdef	NXGE_DEBUG
2447 			if (!rdc_stats->dcf_err) {
2448 				NXGE_DEBUG_MSG((nxgep, RX_CTL,
2449 				"nxge_receive_packet: channel %d dcf_err rcr"
2450 				" 0x%llx", channel, rcr_entry));
2451 			}
2452 #endif
2453 			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
2454 					NXGE_FM_EREPORT_RDMC_DCF_ERR);
2455 		} else if (pkt_too_long_err) {
2456 			rdc_stats->pkt_too_long_err++;
2457 			NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:"
2458 			    " channel %d packet length [%d] > "
2459 			    "maxframesize [%d]", channel, l2_len + ETHERFCSL,
2460 			    nxgep->mac.maxframesize));
2461 		} else {
2462 				/* Update error stats */
2463 			error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2464 			rdc_stats->errlog.compl_err_type = error_type;
2465 
2466 			switch (error_type) {
2467 			/*
2468 			 * Do not send FMA ereport for RCR_L2_ERROR and
2469 			 * RCR_L4_CSUM_ERROR because most likely they indicate
2470 			 * back pressure rather than HW failures.
2471 			 */
2472 			case RCR_L2_ERROR:
2473 				rdc_stats->l2_err++;
2474 				if (rdc_stats->l2_err <
2475 				    error_disp_cnt) {
2476 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2477 					    " nxge_receive_packet:"
2478 					    " channel %d RCR L2_ERROR",
2479 					    channel));
2480 				}
2481 				break;
2482 			case RCR_L4_CSUM_ERROR:
2483 				error_send_up = B_TRUE;
2484 				rdc_stats->l4_cksum_err++;
2485 				if (rdc_stats->l4_cksum_err <
2486 				    error_disp_cnt) {
2487 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2488 					    " nxge_receive_packet:"
2489 					    " channel %d"
2490 					    " RCR L4_CSUM_ERROR", channel));
2491 				}
2492 				break;
2493 			/*
2494 			 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2495 			 * RCR_ZCP_SOFT_ERROR because they reflect the same
2496 			 * FFLP and ZCP errors that have been reported by
2497 			 * nxge_fflp.c and nxge_zcp.c.
2498 			 */
2499 			case RCR_FFLP_SOFT_ERROR:
2500 				error_send_up = B_TRUE;
2501 				rdc_stats->fflp_soft_err++;
2502 				if (rdc_stats->fflp_soft_err <
2503 				    error_disp_cnt) {
2504 					NXGE_ERROR_MSG((nxgep,
2505 					    NXGE_ERR_CTL,
2506 					    " nxge_receive_packet:"
2507 					    " channel %d"
2508 					    " RCR FFLP_SOFT_ERROR", channel));
2509 				}
2510 				break;
2511 			case RCR_ZCP_SOFT_ERROR:
2512 				error_send_up = B_TRUE;
2513 				rdc_stats->fflp_soft_err++;
2514 				if (rdc_stats->zcp_soft_err <
2515 				    error_disp_cnt)
2516 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2517 					    " nxge_receive_packet: Channel %d"
2518 					    " RCR ZCP_SOFT_ERROR", channel));
2519 				break;
2520 			default:
2521 				rdc_stats->rcr_unknown_err++;
2522 				if (rdc_stats->rcr_unknown_err
2523 				    < error_disp_cnt) {
2524 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2525 					    " nxge_receive_packet: Channel %d"
2526 					    " RCR entry 0x%llx error 0x%x",
2527 					    rcr_entry, channel, error_type));
2528 				}
2529 				break;
2530 			}
2531 		}
2532 
2533 		/*
2534 		 * Update and repost buffer block if max usage
2535 		 * count is reached.
2536 		 */
2537 		if (error_send_up == B_FALSE) {
2538 			atomic_inc_32(&rx_msg_p->ref_cnt);
2539 			if (buffer_free == B_TRUE) {
2540 				rx_msg_p->free = B_TRUE;
2541 			}
2542 
2543 			MUTEX_EXIT(&rx_rbr_p->lock);
2544 			MUTEX_EXIT(&rcr_p->lock);
2545 			nxge_freeb(rx_msg_p);
2546 			return;
2547 		}
2548 	}
2549 
2550 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2551 		"==> nxge_receive_packet: DMA sync second "));
2552 
2553 	bytes_read = rcr_p->rcvd_pkt_bytes;
2554 	skip_len = sw_offset_bytes + hdr_size;
2555 	if (!rx_msg_p->rx_use_bcopy) {
2556 		/*
2557 		 * For loaned up buffers, the driver reference count
2558 		 * will be incremented first and then the free state.
2559 		 */
2560 		if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
2561 			if (first_entry) {
2562 				nmp->b_rptr = &nmp->b_rptr[skip_len];
2563 				if (l2_len < bsize - skip_len) {
2564 					nmp->b_wptr = &nmp->b_rptr[l2_len];
2565 				} else {
2566 					nmp->b_wptr = &nmp->b_rptr[bsize
2567 					    - skip_len];
2568 				}
2569 			} else {
2570 				if (l2_len - bytes_read < bsize) {
2571 					nmp->b_wptr =
2572 					    &nmp->b_rptr[l2_len - bytes_read];
2573 				} else {
2574 					nmp->b_wptr = &nmp->b_rptr[bsize];
2575 				}
2576 			}
2577 		}
2578 	} else {
2579 		if (first_entry) {
2580 			nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
2581 			    l2_len < bsize - skip_len ?
2582 			    l2_len : bsize - skip_len);
2583 		} else {
2584 			nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
2585 			    l2_len - bytes_read < bsize ?
2586 			    l2_len - bytes_read : bsize);
2587 		}
2588 	}
2589 	if (nmp != NULL) {
2590 		if (first_entry)
2591 			bytes_read  = nmp->b_wptr - nmp->b_rptr;
2592 		else
2593 			bytes_read += nmp->b_wptr - nmp->b_rptr;
2594 
2595 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2596 		    "==> nxge_receive_packet after dupb: "
2597 		    "rbr consumed %d "
2598 		    "pktbufsz_type %d "
2599 		    "nmp $%p rptr $%p wptr $%p "
2600 		    "buf_offset %d bzise %d l2_len %d skip_len %d",
2601 		    rx_rbr_p->rbr_consumed,
2602 		    pktbufsz_type,
2603 		    nmp, nmp->b_rptr, nmp->b_wptr,
2604 		    buf_offset, bsize, l2_len, skip_len));
2605 	} else {
2606 		cmn_err(CE_WARN, "!nxge_receive_packet: "
2607 			"update stats (error)");
2608 		atomic_inc_32(&rx_msg_p->ref_cnt);
2609 		if (buffer_free == B_TRUE) {
2610 			rx_msg_p->free = B_TRUE;
2611 		}
2612 		MUTEX_EXIT(&rx_rbr_p->lock);
2613 		MUTEX_EXIT(&rcr_p->lock);
2614 		nxge_freeb(rx_msg_p);
2615 		return;
2616 	}
2617 
2618 	if (buffer_free == B_TRUE) {
2619 		rx_msg_p->free = B_TRUE;
2620 	}
2621 	/*
2622 	 * ERROR, FRAG and PKT_TYPE are only reported
2623 	 * in the first entry.
2624 	 * If a packet is not fragmented and no error bit is set, then
2625 	 * L4 checksum is OK.
2626 	 */
2627 	is_valid = (nmp != NULL);
2628 	if (first_entry) {
2629 		rdc_stats->ipackets++; /* count only 1st seg for jumbo */
2630 		rdc_stats->ibytes += skip_len + l2_len < bsize ?
2631 		    l2_len : bsize;
2632 	} else {
2633 		rdc_stats->ibytes += l2_len - bytes_read < bsize ?
2634 		    l2_len - bytes_read : bsize;
2635 	}
2636 
2637 	rcr_p->rcvd_pkt_bytes = bytes_read;
2638 
2639 	MUTEX_EXIT(&rx_rbr_p->lock);
2640 	MUTEX_EXIT(&rcr_p->lock);
2641 
2642 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2643 		atomic_inc_32(&rx_msg_p->ref_cnt);
2644 		nxge_freeb(rx_msg_p);
2645 	}
2646 
2647 	if (is_valid) {
2648 		nmp->b_cont = NULL;
2649 		if (first_entry) {
2650 			*mp = nmp;
2651 			*mp_cont = NULL;
2652 		} else {
2653 			*mp_cont = nmp;
2654 		}
2655 	}
2656 
2657 	/*
2658 	 * Update stats and hardware checksuming.
2659 	 */
2660 	if (is_valid && !multi) {
2661 		/*
2662 		 * If the checksum flag nxge_chksum_offload
2663 		 * is 1, TCP and UDP packets can be sent
2664 		 * up with good checksum. If the checksum flag
2665 		 * is set to 0, checksum reporting will apply to
2666 		 * TCP packets only (workaround for a hardware bug).
2667 		 * If the checksum flag nxge_cksum_offload is
2668 		 * greater than 1, both TCP and UDP packets
2669 		 * will not be reported its hardware checksum results.
2670 		 */
2671 		if (nxge_cksum_offload == 1) {
2672 			is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
2673 				pkt_type == RCR_PKT_IS_UDP) ?
2674 					B_TRUE: B_FALSE);
2675 		} else if (!nxge_cksum_offload) {
2676 			/* TCP checksum only. */
2677 			is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ?
2678 					B_TRUE: B_FALSE);
2679 		}
2680 
2681 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
2682 			"is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
2683 			is_valid, multi, is_tcp_udp, frag, error_type));
2684 
2685 		if (is_tcp_udp && !frag && !error_type) {
2686 			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
2687 				HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
2688 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
2689 				"==> nxge_receive_packet: Full tcp/udp cksum "
2690 				"is_valid 0x%x multi 0x%llx pkt %d frag %d "
2691 				"error %d",
2692 				is_valid, multi, is_tcp_udp, frag, error_type));
2693 		}
2694 	}
2695 
2696 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2697 		"==> nxge_receive_packet: *mp 0x%016llx", *mp));
2698 
2699 	*multi_p = (multi == RCR_MULTI_MASK);
2700 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
2701 		"multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2702 		*multi_p, nmp, *mp, *mp_cont));
2703 }
2704 
2705 /*ARGSUSED*/
2706 static nxge_status_t
2707 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
2708 {
2709 	p_nxge_rx_ring_stats_t	rdc_stats;
2710 	npi_handle_t		handle;
2711 	npi_status_t		rs;
2712 	boolean_t		rxchan_fatal = B_FALSE;
2713 	boolean_t		rxport_fatal = B_FALSE;
2714 	uint8_t			portn;
2715 	nxge_status_t		status = NXGE_OK;
2716 	uint32_t		error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2717 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
2718 
2719 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2720 	portn = nxgep->mac.portnum;
2721 	rdc_stats = &nxgep->statsp->rdc_stats[channel];
2722 
2723 	if (cs.bits.hdw.rbr_tmout) {
2724 		rdc_stats->rx_rbr_tmout++;
2725 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2726 					NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
2727 		rxchan_fatal = B_TRUE;
2728 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2729 			"==> nxge_rx_err_evnts: rx_rbr_timeout"));
2730 	}
2731 	if (cs.bits.hdw.rsp_cnt_err) {
2732 		rdc_stats->rsp_cnt_err++;
2733 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2734 					NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
2735 		rxchan_fatal = B_TRUE;
2736 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2737 			"==> nxge_rx_err_evnts(channel %d): "
2738 			"rsp_cnt_err", channel));
2739 	}
2740 	if (cs.bits.hdw.byte_en_bus) {
2741 		rdc_stats->byte_en_bus++;
2742 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2743 					NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
2744 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2745 			"==> nxge_rx_err_evnts(channel %d): "
2746 			"fatal error: byte_en_bus", channel));
2747 		rxchan_fatal = B_TRUE;
2748 	}
2749 	if (cs.bits.hdw.rsp_dat_err) {
2750 		rdc_stats->rsp_dat_err++;
2751 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2752 					NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
2753 		rxchan_fatal = B_TRUE;
2754 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2755 			"==> nxge_rx_err_evnts(channel %d): "
2756 			"fatal error: rsp_dat_err", channel));
2757 	}
2758 	if (cs.bits.hdw.rcr_ack_err) {
2759 		rdc_stats->rcr_ack_err++;
2760 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2761 					NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
2762 		rxchan_fatal = B_TRUE;
2763 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2764 			"==> nxge_rx_err_evnts(channel %d): "
2765 			"fatal error: rcr_ack_err", channel));
2766 	}
2767 	if (cs.bits.hdw.dc_fifo_err) {
2768 		rdc_stats->dc_fifo_err++;
2769 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2770 					NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
2771 		/* This is not a fatal error! */
2772 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2773 			"==> nxge_rx_err_evnts(channel %d): "
2774 			"dc_fifo_err", channel));
2775 		rxport_fatal = B_TRUE;
2776 	}
2777 	if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
2778 		if ((rs = npi_rxdma_ring_perr_stat_get(handle,
2779 				&rdc_stats->errlog.pre_par,
2780 				&rdc_stats->errlog.sha_par))
2781 				!= NPI_SUCCESS) {
2782 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2783 				"==> nxge_rx_err_evnts(channel %d): "
2784 				"rcr_sha_par: get perr", channel));
2785 			return (NXGE_ERROR | rs);
2786 		}
2787 		if (cs.bits.hdw.rcr_sha_par) {
2788 			rdc_stats->rcr_sha_par++;
2789 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2790 					NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
2791 			rxchan_fatal = B_TRUE;
2792 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2793 				"==> nxge_rx_err_evnts(channel %d): "
2794 				"fatal error: rcr_sha_par", channel));
2795 		}
2796 		if (cs.bits.hdw.rbr_pre_par) {
2797 			rdc_stats->rbr_pre_par++;
2798 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2799 					NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
2800 			rxchan_fatal = B_TRUE;
2801 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2802 				"==> nxge_rx_err_evnts(channel %d): "
2803 				"fatal error: rbr_pre_par", channel));
2804 		}
2805 	}
2806 	/*
2807 	 * The Following 4 status bits are for information, the system
2808 	 * is running fine. There is no need to send FMA ereports or
2809 	 * log messages.
2810 	 */
2811 	if (cs.bits.hdw.port_drop_pkt) {
2812 		rdc_stats->port_drop_pkt++;
2813 	}
2814 	if (cs.bits.hdw.wred_drop) {
2815 		rdc_stats->wred_drop++;
2816 	}
2817 	if (cs.bits.hdw.rbr_pre_empty) {
2818 		rdc_stats->rbr_pre_empty++;
2819 	}
2820 	if (cs.bits.hdw.rcr_shadow_full) {
2821 		rdc_stats->rcr_shadow_full++;
2822 	}
2823 	if (cs.bits.hdw.config_err) {
2824 		rdc_stats->config_err++;
2825 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2826 					NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
2827 		rxchan_fatal = B_TRUE;
2828 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2829 			"==> nxge_rx_err_evnts(channel %d): "
2830 			"config error", channel));
2831 	}
2832 	if (cs.bits.hdw.rcrincon) {
2833 		rdc_stats->rcrincon++;
2834 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2835 					NXGE_FM_EREPORT_RDMC_RCRINCON);
2836 		rxchan_fatal = B_TRUE;
2837 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2838 			"==> nxge_rx_err_evnts(channel %d): "
2839 			"fatal error: rcrincon error", channel));
2840 	}
2841 	if (cs.bits.hdw.rcrfull) {
2842 		rdc_stats->rcrfull++;
2843 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2844 					NXGE_FM_EREPORT_RDMC_RCRFULL);
2845 		rxchan_fatal = B_TRUE;
2846 		if (rdc_stats->rcrfull < error_disp_cnt)
2847 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2848 			"==> nxge_rx_err_evnts(channel %d): "
2849 			"fatal error: rcrfull error", channel));
2850 	}
2851 	if (cs.bits.hdw.rbr_empty) {
2852 		/*
2853 		 * This bit is for information, there is no need
2854 		 * send FMA ereport or log a message.
2855 		 */
2856 		rdc_stats->rbr_empty++;
2857 	}
2858 	if (cs.bits.hdw.rbrfull) {
2859 		rdc_stats->rbrfull++;
2860 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2861 					NXGE_FM_EREPORT_RDMC_RBRFULL);
2862 		rxchan_fatal = B_TRUE;
2863 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2864 			"==> nxge_rx_err_evnts(channel %d): "
2865 			"fatal error: rbr_full error", channel));
2866 	}
2867 	if (cs.bits.hdw.rbrlogpage) {
2868 		rdc_stats->rbrlogpage++;
2869 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2870 					NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
2871 		rxchan_fatal = B_TRUE;
2872 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2873 			"==> nxge_rx_err_evnts(channel %d): "
2874 			"fatal error: rbr logical page error", channel));
2875 	}
2876 	if (cs.bits.hdw.cfiglogpage) {
2877 		rdc_stats->cfiglogpage++;
2878 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2879 					NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
2880 		rxchan_fatal = B_TRUE;
2881 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2882 			"==> nxge_rx_err_evnts(channel %d): "
2883 			"fatal error: cfig logical page error", channel));
2884 	}
2885 
2886 	if (rxport_fatal)  {
2887 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2888 		    " nxge_rx_err_evnts: fatal error on Port #%d\n",
2889 		    portn));
2890 		if (isLDOMguest(nxgep)) {
2891 			status = NXGE_ERROR;
2892 		} else {
2893 			status = nxge_ipp_fatal_err_recover(nxgep);
2894 			if (status == NXGE_OK) {
2895 				FM_SERVICE_RESTORED(nxgep);
2896 			}
2897 		}
2898 	}
2899 
2900 	if (rxchan_fatal) {
2901 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2902 		    " nxge_rx_err_evnts: fatal error on Channel #%d\n",
2903 		    channel));
2904 		if (isLDOMguest(nxgep)) {
2905 			status = NXGE_ERROR;
2906 		} else {
2907 			status = nxge_rxdma_fatal_err_recover(nxgep, channel);
2908 			if (status == NXGE_OK) {
2909 				FM_SERVICE_RESTORED(nxgep);
2910 			}
2911 		}
2912 	}
2913 
2914 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
2915 
2916 	return (status);
2917 }
2918 
2919 /*
2920  * nxge_rdc_hvio_setup
2921  *
2922  *	This code appears to setup some Hypervisor variables.
2923  *
2924  * Arguments:
2925  * 	nxgep
2926  * 	channel
2927  *
2928  * Notes:
2929  *	What does NIU_LP_WORKAROUND mean?
2930  *
2931  * NPI/NXGE function calls:
2932  *	na
2933  *
2934  * Context:
2935  *	Any domain
2936  */
2937 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2938 static void
2939 nxge_rdc_hvio_setup(
2940 	nxge_t *nxgep, int channel)
2941 {
2942 	nxge_dma_common_t	*dma_common;
2943 	nxge_dma_common_t	*dma_control;
2944 	rx_rbr_ring_t		*ring;
2945 
2946 	ring = nxgep->rx_rbr_rings->rbr_rings[channel];
2947 	dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2948 
2949 	ring->hv_set = B_FALSE;
2950 
2951 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)
2952 	    dma_common->orig_ioaddr_pp;
2953 	ring->hv_rx_buf_ioaddr_size = (uint64_t)
2954 	    dma_common->orig_alength;
2955 
2956 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
2957 	    "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
2958 	    channel, ring->hv_rx_buf_base_ioaddr_pp,
2959 	    dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size,
2960 	    dma_common->orig_alength, dma_common->orig_alength));
2961 
2962 	dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2963 
2964 	ring->hv_rx_cntl_base_ioaddr_pp =
2965 	    (uint64_t)dma_control->orig_ioaddr_pp;
2966 	ring->hv_rx_cntl_ioaddr_size =
2967 	    (uint64_t)dma_control->orig_alength;
2968 
2969 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
2970 	    "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
2971 	    channel, ring->hv_rx_cntl_base_ioaddr_pp,
2972 	    dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size,
2973 	    dma_control->orig_alength, dma_control->orig_alength));
2974 }
2975 #endif
2976 
2977 /*
2978  * nxge_map_rxdma
2979  *
2980  *	Map an RDC into our kernel space.
2981  *
2982  * Arguments:
2983  * 	nxgep
2984  * 	channel	The channel to map.
2985  *
2986  * Notes:
2987  *	1. Allocate & initialise a memory pool, if necessary.
2988  *	2. Allocate however many receive buffers are required.
2989  *	3. Setup buffers, descriptors, and mailbox.
2990  *
2991  * NPI/NXGE function calls:
2992  *	nxge_alloc_rx_mem_pool()
2993  *	nxge_alloc_rbb()
2994  *	nxge_map_rxdma_channel()
2995  *
2996  * Registers accessed:
2997  *
2998  * Context:
2999  *	Any domain
3000  */
3001 static nxge_status_t
3002 nxge_map_rxdma(p_nxge_t nxgep, int channel)
3003 {
3004 	nxge_dma_common_t	**data;
3005 	nxge_dma_common_t	**control;
3006 	rx_rbr_ring_t		**rbr_ring;
3007 	rx_rcr_ring_t		**rcr_ring;
3008 	rx_mbox_t		**mailbox;
3009 	uint32_t		chunks;
3010 
3011 	nxge_status_t		status;
3012 
3013 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
3014 
3015 	if (!nxgep->rx_buf_pool_p) {
3016 		if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) {
3017 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3018 			    "<== nxge_map_rxdma: buf not allocated"));
3019 			return (NXGE_ERROR);
3020 		}
3021 	}
3022 
3023 	if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK)
3024 		return (NXGE_ERROR);
3025 
3026 	/*
3027 	 * Timeout should be set based on the system clock divider.
3028 	 * The following timeout value of 1 assumes that the
3029 	 * granularity (1000) is 3 microseconds running at 300MHz.
3030 	 */
3031 
3032 	nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
3033 	nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
3034 
3035 	/*
3036 	 * Map descriptors from the buffer polls for each dma channel.
3037 	 */
3038 
3039 	/*
3040 	 * Set up and prepare buffer blocks, descriptors
3041 	 * and mailbox.
3042 	 */
3043 	data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3044 	rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel];
3045 	chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
3046 
3047 	control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3048 	rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel];
3049 
3050 	mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3051 
3052 	status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring,
3053 	    chunks, control, rcr_ring, mailbox);
3054 	if (status != NXGE_OK) {
3055 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3056 			"==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
3057 			"returned 0x%x",
3058 			channel, status));
3059 		return (status);
3060 	}
3061 	nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel;
3062 	nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel;
3063 	nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats =
3064 	    &nxgep->statsp->rdc_stats[channel];
3065 
3066 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3067 	if (!isLDOMguest(nxgep))
3068 		nxge_rdc_hvio_setup(nxgep, channel);
3069 #endif
3070 
3071 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3072 	    "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel));
3073 
3074 	return (status);
3075 }
3076 
3077 static void
3078 nxge_unmap_rxdma(p_nxge_t nxgep, int channel)
3079 {
3080 	rx_rbr_ring_t	*rbr_ring;
3081 	rx_rcr_ring_t	*rcr_ring;
3082 	rx_mbox_t	*mailbox;
3083 
3084 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel));
3085 
3086 	if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings ||
3087 	    !nxgep->rx_mbox_areas_p)
3088 		return;
3089 
3090 	rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3091 	rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
3092 	mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3093 
3094 	if (!rbr_ring || !rcr_ring || !mailbox)
3095 		return;
3096 
3097 	(void) nxge_unmap_rxdma_channel(
3098 		nxgep, channel, rbr_ring, rcr_ring, mailbox);
3099 
3100 	nxge_free_rxb(nxgep, channel);
3101 
3102 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma"));
3103 }
3104 
3105 nxge_status_t
3106 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3107     p_nxge_dma_common_t *dma_buf_p,  p_rx_rbr_ring_t *rbr_p,
3108     uint32_t num_chunks,
3109     p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
3110     p_rx_mbox_t *rx_mbox_p)
3111 {
3112 	int	status = NXGE_OK;
3113 
3114 	/*
3115 	 * Set up and prepare buffer blocks, descriptors
3116 	 * and mailbox.
3117 	 */
3118 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3119 		"==> nxge_map_rxdma_channel (channel %d)", channel));
3120 	/*
3121 	 * Receive buffer blocks
3122 	 */
3123 	status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
3124 			dma_buf_p, rbr_p, num_chunks);
3125 	if (status != NXGE_OK) {
3126 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3127 			"==> nxge_map_rxdma_channel (channel %d): "
3128 			"map buffer failed 0x%x", channel, status));
3129 		goto nxge_map_rxdma_channel_exit;
3130 	}
3131 
3132 	/*
3133 	 * Receive block ring, completion ring and mailbox.
3134 	 */
3135 	status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
3136 			dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
3137 	if (status != NXGE_OK) {
3138 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3139 			"==> nxge_map_rxdma_channel (channel %d): "
3140 			"map config failed 0x%x", channel, status));
3141 		goto nxge_map_rxdma_channel_fail2;
3142 	}
3143 
3144 	goto nxge_map_rxdma_channel_exit;
3145 
3146 nxge_map_rxdma_channel_fail3:
3147 	/* Free rbr, rcr */
3148 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3149 		"==> nxge_map_rxdma_channel: free rbr/rcr "
3150 		"(status 0x%x channel %d)",
3151 		status, channel));
3152 	nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3153 		*rcr_p, *rx_mbox_p);
3154 
3155 nxge_map_rxdma_channel_fail2:
3156 	/* Free buffer blocks */
3157 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3158 		"==> nxge_map_rxdma_channel: free rx buffers"
3159 		"(nxgep 0x%x status 0x%x channel %d)",
3160 		nxgep, status, channel));
3161 	nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
3162 
3163 	status = NXGE_ERROR;
3164 
3165 nxge_map_rxdma_channel_exit:
3166 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3167 		"<== nxge_map_rxdma_channel: "
3168 		"(nxgep 0x%x status 0x%x channel %d)",
3169 		nxgep, status, channel));
3170 
3171 	return (status);
3172 }
3173 
3174 /*ARGSUSED*/
3175 static void
3176 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3177     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3178 {
3179 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3180 		"==> nxge_unmap_rxdma_channel (channel %d)", channel));
3181 
3182 	/*
3183 	 * unmap receive block ring, completion ring and mailbox.
3184 	 */
3185 	(void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3186 			rcr_p, rx_mbox_p);
3187 
3188 	/* unmap buffer blocks */
3189 	(void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
3190 
3191 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
3192 }
3193 
3194 /*ARGSUSED*/
3195 static nxge_status_t
3196 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
3197     p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
3198     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
3199 {
3200 	p_rx_rbr_ring_t 	rbrp;
3201 	p_rx_rcr_ring_t 	rcrp;
3202 	p_rx_mbox_t 		mboxp;
3203 	p_nxge_dma_common_t 	cntl_dmap;
3204 	p_nxge_dma_common_t 	dmap;
3205 	p_rx_msg_t 		*rx_msg_ring;
3206 	p_rx_msg_t 		rx_msg_p;
3207 	p_rbr_cfig_a_t		rcfga_p;
3208 	p_rbr_cfig_b_t		rcfgb_p;
3209 	p_rcrcfig_a_t		cfga_p;
3210 	p_rcrcfig_b_t		cfgb_p;
3211 	p_rxdma_cfig1_t		cfig1_p;
3212 	p_rxdma_cfig2_t		cfig2_p;
3213 	p_rbr_kick_t		kick_p;
3214 	uint32_t		dmaaddrp;
3215 	uint32_t		*rbr_vaddrp;
3216 	uint32_t		bkaddr;
3217 	nxge_status_t		status = NXGE_OK;
3218 	int			i;
3219 	uint32_t 		nxge_port_rcr_size;
3220 
3221 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3222 		"==> nxge_map_rxdma_channel_cfg_ring"));
3223 
3224 	cntl_dmap = *dma_cntl_p;
3225 
3226 	/* Map in the receive block ring */
3227 	rbrp = *rbr_p;
3228 	dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
3229 	nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
3230 	/*
3231 	 * Zero out buffer block ring descriptors.
3232 	 */
3233 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
3234 
3235 	rcfga_p = &(rbrp->rbr_cfga);
3236 	rcfgb_p = &(rbrp->rbr_cfgb);
3237 	kick_p = &(rbrp->rbr_kick);
3238 	rcfga_p->value = 0;
3239 	rcfgb_p->value = 0;
3240 	kick_p->value = 0;
3241 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
3242 	rcfga_p->value = (rbrp->rbr_addr &
3243 				(RBR_CFIG_A_STDADDR_MASK |
3244 				RBR_CFIG_A_STDADDR_BASE_MASK));
3245 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
3246 
3247 	rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
3248 	rcfgb_p->bits.ldw.vld0 = 1;
3249 	rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
3250 	rcfgb_p->bits.ldw.vld1 = 1;
3251 	rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
3252 	rcfgb_p->bits.ldw.vld2 = 1;
3253 	rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
3254 
3255 	/*
3256 	 * For each buffer block, enter receive block address to the ring.
3257 	 */
3258 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
3259 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
3260 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3261 		"==> nxge_map_rxdma_channel_cfg_ring: channel %d "
3262 		"rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
3263 
3264 	rx_msg_ring = rbrp->rx_msg_ring;
3265 	for (i = 0; i < rbrp->tnblocks; i++) {
3266 		rx_msg_p = rx_msg_ring[i];
3267 		rx_msg_p->nxgep = nxgep;
3268 		rx_msg_p->rx_rbr_p = rbrp;
3269 		bkaddr = (uint32_t)
3270 			((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
3271 				>> RBR_BKADDR_SHIFT));
3272 		rx_msg_p->free = B_FALSE;
3273 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
3274 
3275 		*rbr_vaddrp++ = bkaddr;
3276 	}
3277 
3278 	kick_p->bits.ldw.bkadd = rbrp->rbb_max;
3279 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3280 
3281 	rbrp->rbr_rd_index = 0;
3282 
3283 	rbrp->rbr_consumed = 0;
3284 	rbrp->rbr_use_bcopy = B_TRUE;
3285 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
3286 	/*
3287 	 * Do bcopy on packets greater than bcopy size once
3288 	 * the lo threshold is reached.
3289 	 * This lo threshold should be less than the hi threshold.
3290 	 *
3291 	 * Do bcopy on every packet once the hi threshold is reached.
3292 	 */
3293 	if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
3294 		/* default it to use hi */
3295 		nxge_rx_threshold_lo = nxge_rx_threshold_hi;
3296 	}
3297 
3298 	if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
3299 		nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
3300 	}
3301 	rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
3302 
3303 	switch (nxge_rx_threshold_hi) {
3304 	default:
3305 	case	NXGE_RX_COPY_NONE:
3306 		/* Do not do bcopy at all */
3307 		rbrp->rbr_use_bcopy = B_FALSE;
3308 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
3309 		break;
3310 
3311 	case NXGE_RX_COPY_1:
3312 	case NXGE_RX_COPY_2:
3313 	case NXGE_RX_COPY_3:
3314 	case NXGE_RX_COPY_4:
3315 	case NXGE_RX_COPY_5:
3316 	case NXGE_RX_COPY_6:
3317 	case NXGE_RX_COPY_7:
3318 		rbrp->rbr_threshold_hi =
3319 			rbrp->rbb_max *
3320 			(nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
3321 		break;
3322 
3323 	case NXGE_RX_COPY_ALL:
3324 		rbrp->rbr_threshold_hi = 0;
3325 		break;
3326 	}
3327 
3328 	switch (nxge_rx_threshold_lo) {
3329 	default:
3330 	case	NXGE_RX_COPY_NONE:
3331 		/* Do not do bcopy at all */
3332 		if (rbrp->rbr_use_bcopy) {
3333 			rbrp->rbr_use_bcopy = B_FALSE;
3334 		}
3335 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
3336 		break;
3337 
3338 	case NXGE_RX_COPY_1:
3339 	case NXGE_RX_COPY_2:
3340 	case NXGE_RX_COPY_3:
3341 	case NXGE_RX_COPY_4:
3342 	case NXGE_RX_COPY_5:
3343 	case NXGE_RX_COPY_6:
3344 	case NXGE_RX_COPY_7:
3345 		rbrp->rbr_threshold_lo =
3346 			rbrp->rbb_max *
3347 			(nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
3348 		break;
3349 
3350 	case NXGE_RX_COPY_ALL:
3351 		rbrp->rbr_threshold_lo = 0;
3352 		break;
3353 	}
3354 
3355 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
3356 		"nxge_map_rxdma_channel_cfg_ring: channel %d "
3357 		"rbb_max %d "
3358 		"rbrp->rbr_bufsize_type %d "
3359 		"rbb_threshold_hi %d "
3360 		"rbb_threshold_lo %d",
3361 		dma_channel,
3362 		rbrp->rbb_max,
3363 		rbrp->rbr_bufsize_type,
3364 		rbrp->rbr_threshold_hi,
3365 		rbrp->rbr_threshold_lo));
3366 
3367 	rbrp->page_valid.value = 0;
3368 	rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
3369 	rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
3370 	rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
3371 	rbrp->page_hdl.value = 0;
3372 
3373 	rbrp->page_valid.bits.ldw.page0 = 1;
3374 	rbrp->page_valid.bits.ldw.page1 = 1;
3375 
3376 	/* Map in the receive completion ring */
3377 	rcrp = (p_rx_rcr_ring_t)
3378 		KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
3379 	rcrp->rdc = dma_channel;
3380 
3381 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
3382 	rcrp->comp_size = nxge_port_rcr_size;
3383 	rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
3384 
3385 	rcrp->max_receive_pkts = nxge_max_rx_pkts;
3386 
3387 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
3388 	nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
3389 			sizeof (rcr_entry_t));
3390 	rcrp->comp_rd_index = 0;
3391 	rcrp->comp_wt_index = 0;
3392 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3393 		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3394 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3395 #if defined(__i386)
3396 		(p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3397 #else
3398 		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3399 #endif
3400 
3401 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3402 			(nxge_port_rcr_size - 1);
3403 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3404 			(nxge_port_rcr_size - 1);
3405 
3406 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3407 		"==> nxge_map_rxdma_channel_cfg_ring: "
3408 		"channel %d "
3409 		"rbr_vaddrp $%p "
3410 		"rcr_desc_rd_head_p $%p "
3411 		"rcr_desc_rd_head_pp $%p "
3412 		"rcr_desc_rd_last_p $%p "
3413 		"rcr_desc_rd_last_pp $%p ",
3414 		dma_channel,
3415 		rbr_vaddrp,
3416 		rcrp->rcr_desc_rd_head_p,
3417 		rcrp->rcr_desc_rd_head_pp,
3418 		rcrp->rcr_desc_last_p,
3419 		rcrp->rcr_desc_last_pp));
3420 
3421 	/*
3422 	 * Zero out buffer block ring descriptors.
3423 	 */
3424 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
3425 	rcrp->intr_timeout = nxgep->intr_timeout;
3426 	rcrp->intr_threshold = nxgep->intr_threshold;
3427 	rcrp->full_hdr_flag = B_FALSE;
3428 	rcrp->sw_priv_hdr_len = 0;
3429 
3430 	cfga_p = &(rcrp->rcr_cfga);
3431 	cfgb_p = &(rcrp->rcr_cfgb);
3432 	cfga_p->value = 0;
3433 	cfgb_p->value = 0;
3434 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
3435 	cfga_p->value = (rcrp->rcr_addr &
3436 			    (RCRCFIG_A_STADDR_MASK |
3437 			    RCRCFIG_A_STADDR_BASE_MASK));
3438 
3439 	rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
3440 				RCRCFIG_A_LEN_SHIF);
3441 
3442 	/*
3443 	 * Timeout should be set based on the system clock divider.
3444 	 * The following timeout value of 1 assumes that the
3445 	 * granularity (1000) is 3 microseconds running at 300MHz.
3446 	 */
3447 	cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
3448 	cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
3449 	cfgb_p->bits.ldw.entout = 1;
3450 
3451 	/* Map in the mailbox */
3452 	mboxp = (p_rx_mbox_t)
3453 			KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
3454 	dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
3455 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
3456 	cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
3457 	cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
3458 	cfig1_p->value = cfig2_p->value = 0;
3459 
3460 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
3461 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3462 		"==> nxge_map_rxdma_channel_cfg_ring: "
3463 		"channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
3464 		dma_channel, cfig1_p->value, cfig2_p->value,
3465 		mboxp->mbox_addr));
3466 
3467 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
3468 			& 0xfff);
3469 	cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
3470 
3471 
3472 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
3473 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
3474 				RXDMA_CFIG2_MBADDR_L_MASK);
3475 
3476 	cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
3477 
3478 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3479 		"==> nxge_map_rxdma_channel_cfg_ring: "
3480 		"channel %d damaddrp $%p "
3481 		"cfg1 0x%016llx cfig2 0x%016llx",
3482 		dma_channel, dmaaddrp,
3483 		cfig1_p->value, cfig2_p->value));
3484 
3485 	cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
3486 	cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
3487 
3488 	rbrp->rx_rcr_p = rcrp;
3489 	rcrp->rx_rbr_p = rbrp;
3490 	*rcr_p = rcrp;
3491 	*rx_mbox_p = mboxp;
3492 
3493 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3494 		"<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
3495 
3496 	return (status);
3497 }
3498 
3499 /*ARGSUSED*/
3500 static void
3501 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
3502     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3503 {
3504 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3505 		"==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
3506 		rcr_p->rdc));
3507 
3508 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
3509 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
3510 
3511 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3512 		"<== nxge_unmap_rxdma_channel_cfg_ring"));
3513 }
3514 
3515 static nxge_status_t
3516 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
3517     p_nxge_dma_common_t *dma_buf_p,
3518     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
3519 {
3520 	p_rx_rbr_ring_t 	rbrp;
3521 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
3522 	p_rx_msg_t 		*rx_msg_ring;
3523 	p_rx_msg_t 		rx_msg_p;
3524 	p_mblk_t 		mblk_p;
3525 
3526 	rxring_info_t *ring_info;
3527 	nxge_status_t		status = NXGE_OK;
3528 	int			i, j, index;
3529 	uint32_t		size, bsize, nblocks, nmsgs;
3530 
3531 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3532 		"==> nxge_map_rxdma_channel_buf_ring: channel %d",
3533 		channel));
3534 
3535 	dma_bufp = tmp_bufp = *dma_buf_p;
3536 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3537 		" nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
3538 		"chunks bufp 0x%016llx",
3539 		channel, num_chunks, dma_bufp));
3540 
3541 	nmsgs = 0;
3542 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
3543 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3544 			"==> nxge_map_rxdma_channel_buf_ring: channel %d "
3545 			"bufp 0x%016llx nblocks %d nmsgs %d",
3546 			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
3547 		nmsgs += tmp_bufp->nblocks;
3548 	}
3549 	if (!nmsgs) {
3550 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3551 			"<== nxge_map_rxdma_channel_buf_ring: channel %d "
3552 			"no msg blocks",
3553 			channel));
3554 		status = NXGE_ERROR;
3555 		goto nxge_map_rxdma_channel_buf_ring_exit;
3556 	}
3557 
3558 	rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP);
3559 
3560 	size = nmsgs * sizeof (p_rx_msg_t);
3561 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
3562 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
3563 		KM_SLEEP);
3564 
3565 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
3566 				(void *)nxgep->interrupt_cookie);
3567 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
3568 				(void *)nxgep->interrupt_cookie);
3569 	rbrp->rdc = channel;
3570 	rbrp->num_blocks = num_chunks;
3571 	rbrp->tnblocks = nmsgs;
3572 	rbrp->rbb_max = nmsgs;
3573 	rbrp->rbr_max_size = nmsgs;
3574 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
3575 
3576 	/*
3577 	 * Buffer sizes suggested by NIU architect.
3578 	 * 256, 512 and 2K.
3579 	 */
3580 
3581 	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
3582 	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
3583 	rbrp->npi_pkt_buf_size0 = SIZE_256B;
3584 
3585 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
3586 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
3587 	rbrp->npi_pkt_buf_size1 = SIZE_1KB;
3588 
3589 	rbrp->block_size = nxgep->rx_default_block_size;
3590 
3591 	if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) {
3592 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
3593 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
3594 		rbrp->npi_pkt_buf_size2 = SIZE_2KB;
3595 	} else {
3596 		if (rbrp->block_size >= 0x2000) {
3597 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
3598 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
3599 			rbrp->npi_pkt_buf_size2 = SIZE_8KB;
3600 		} else {
3601 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
3602 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
3603 			rbrp->npi_pkt_buf_size2 = SIZE_4KB;
3604 		}
3605 	}
3606 
3607 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3608 		"==> nxge_map_rxdma_channel_buf_ring: channel %d "
3609 		"actual rbr max %d rbb_max %d nmsgs %d "
3610 		"rbrp->block_size %d default_block_size %d "
3611 		"(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
3612 		channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
3613 		rbrp->block_size, nxgep->rx_default_block_size,
3614 		nxge_rbr_size, nxge_rbr_spare_size));
3615 
3616 	/* Map in buffers from the buffer pool.  */
3617 	index = 0;
3618 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
3619 		bsize = dma_bufp->block_size;
3620 		nblocks = dma_bufp->nblocks;
3621 #if defined(__i386)
3622 		ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
3623 #else
3624 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
3625 #endif
3626 		ring_info->buffer[i].buf_index = i;
3627 		ring_info->buffer[i].buf_size = dma_bufp->alength;
3628 		ring_info->buffer[i].start_index = index;
3629 #if defined(__i386)
3630 		ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
3631 #else
3632 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
3633 #endif
3634 
3635 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3636 			" nxge_map_rxdma_channel_buf_ring: map channel %d "
3637 			"chunk %d"
3638 			" nblocks %d chunk_size %x block_size 0x%x "
3639 			"dma_bufp $%p", channel, i,
3640 			dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3641 			dma_bufp));
3642 
3643 		for (j = 0; j < nblocks; j++) {
3644 			if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
3645 					dma_bufp)) == NULL) {
3646 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3647 					"allocb failed (index %d i %d j %d)",
3648 					index, i, j));
3649 				goto nxge_map_rxdma_channel_buf_ring_fail1;
3650 			}
3651 			rx_msg_ring[index] = rx_msg_p;
3652 			rx_msg_p->block_index = index;
3653 			rx_msg_p->shifted_addr = (uint32_t)
3654 				((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
3655 					    RBR_BKADDR_SHIFT));
3656 
3657 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3658 				"index %d j %d rx_msg_p $%p mblk %p",
3659 				index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
3660 
3661 			mblk_p = rx_msg_p->rx_mblk_p;
3662 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3663 
3664 			rbrp->rbr_ref_cnt++;
3665 			index++;
3666 			rx_msg_p->buf_dma.dma_channel = channel;
3667 		}
3668 
3669 		rbrp->rbr_alloc_type = DDI_MEM_ALLOC;
3670 		if (dma_bufp->contig_alloc_type) {
3671 			rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC;
3672 		}
3673 
3674 		if (dma_bufp->kmem_alloc_type) {
3675 			rbrp->rbr_alloc_type = KMEM_ALLOC;
3676 		}
3677 
3678 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3679 		    " nxge_map_rxdma_channel_buf_ring: map channel %d "
3680 		    "chunk %d"
3681 		    " nblocks %d chunk_size %x block_size 0x%x "
3682 		    "dma_bufp $%p",
3683 		    channel, i,
3684 		    dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3685 		    dma_bufp));
3686 	}
3687 	if (i < rbrp->num_blocks) {
3688 		goto nxge_map_rxdma_channel_buf_ring_fail1;
3689 	}
3690 
3691 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3692 		"nxge_map_rxdma_channel_buf_ring: done buf init "
3693 			"channel %d msg block entries %d",
3694 			channel, index));
3695 	ring_info->block_size_mask = bsize - 1;
3696 	rbrp->rx_msg_ring = rx_msg_ring;
3697 	rbrp->dma_bufp = dma_buf_p;
3698 	rbrp->ring_info = ring_info;
3699 
3700 	status = nxge_rxbuf_index_info_init(nxgep, rbrp);
3701 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3702 		" nxge_map_rxdma_channel_buf_ring: "
3703 		"channel %d done buf info init", channel));
3704 
3705 	/*
3706 	 * Finally, permit nxge_freeb() to call nxge_post_page().
3707 	 */
3708 	rbrp->rbr_state = RBR_POSTING;
3709 
3710 	*rbr_p = rbrp;
3711 	goto nxge_map_rxdma_channel_buf_ring_exit;
3712 
3713 nxge_map_rxdma_channel_buf_ring_fail1:
3714 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3715 		" nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
3716 		channel, status));
3717 
3718 	index--;
3719 	for (; index >= 0; index--) {
3720 		rx_msg_p = rx_msg_ring[index];
3721 		if (rx_msg_p != NULL) {
3722 			freeb(rx_msg_p->rx_mblk_p);
3723 			rx_msg_ring[index] = NULL;
3724 		}
3725 	}
3726 nxge_map_rxdma_channel_buf_ring_fail:
3727 	MUTEX_DESTROY(&rbrp->post_lock);
3728 	MUTEX_DESTROY(&rbrp->lock);
3729 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
3730 	KMEM_FREE(rx_msg_ring, size);
3731 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
3732 
3733 	status = NXGE_ERROR;
3734 
3735 nxge_map_rxdma_channel_buf_ring_exit:
3736 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3737 		"<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
3738 
3739 	return (status);
3740 }
3741 
3742 /*ARGSUSED*/
3743 static void
3744 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
3745     p_rx_rbr_ring_t rbr_p)
3746 {
3747 	p_rx_msg_t 		*rx_msg_ring;
3748 	p_rx_msg_t 		rx_msg_p;
3749 	rxring_info_t 		*ring_info;
3750 	int			i;
3751 	uint32_t		size;
3752 #ifdef	NXGE_DEBUG
3753 	int			num_chunks;
3754 #endif
3755 
3756 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3757 		"==> nxge_unmap_rxdma_channel_buf_ring"));
3758 	if (rbr_p == NULL) {
3759 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
3760 			"<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
3761 		return;
3762 	}
3763 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3764 		"==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
3765 		rbr_p->rdc));
3766 
3767 	rx_msg_ring = rbr_p->rx_msg_ring;
3768 	ring_info = rbr_p->ring_info;
3769 
3770 	if (rx_msg_ring == NULL || ring_info == NULL) {
3771 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3772 		"<== nxge_unmap_rxdma_channel_buf_ring: "
3773 		"rx_msg_ring $%p ring_info $%p",
3774 		rx_msg_p, ring_info));
3775 		return;
3776 	}
3777 
3778 #ifdef	NXGE_DEBUG
3779 	num_chunks = rbr_p->num_blocks;
3780 #endif
3781 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
3782 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3783 		" nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
3784 		"tnblocks %d (max %d) size ptrs %d ",
3785 		rbr_p->rdc, num_chunks,
3786 		rbr_p->tnblocks, rbr_p->rbr_max_size, size));
3787 
3788 	for (i = 0; i < rbr_p->tnblocks; i++) {
3789 		rx_msg_p = rx_msg_ring[i];
3790 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3791 			" nxge_unmap_rxdma_channel_buf_ring: "
3792 			"rx_msg_p $%p",
3793 			rx_msg_p));
3794 		if (rx_msg_p != NULL) {
3795 			freeb(rx_msg_p->rx_mblk_p);
3796 			rx_msg_ring[i] = NULL;
3797 		}
3798 	}
3799 
3800 	/*
3801 	 * We no longer may use the mutex <post_lock>. By setting
3802 	 * <rbr_state> to anything but POSTING, we prevent
3803 	 * nxge_post_page() from accessing a dead mutex.
3804 	 */
3805 	rbr_p->rbr_state = RBR_UNMAPPING;
3806 	MUTEX_DESTROY(&rbr_p->post_lock);
3807 
3808 	MUTEX_DESTROY(&rbr_p->lock);
3809 
3810 	if (rbr_p->rbr_ref_cnt == 0) {
3811 		/*
3812 		 * This is the normal state of affairs.
3813 		 * Need to free the following buffers:
3814 		 *  - data buffers
3815 		 *  - rx_msg ring
3816 		 *  - ring_info
3817 		 *  - rbr ring
3818 		 */
3819 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
3820 		    "unmap_rxdma_buf_ring: No outstanding - freeing "));
3821 		nxge_rxdma_databuf_free(rbr_p);
3822 		KMEM_FREE(ring_info, sizeof (rxring_info_t));
3823 		KMEM_FREE(rx_msg_ring, size);
3824 		KMEM_FREE(rbr_p, sizeof (*rbr_p));
3825 	} else {
3826 		/*
3827 		 * Some of our buffers are still being used.
3828 		 * Therefore, tell nxge_freeb() this ring is
3829 		 * unmapped, so it may free <rbr_p> for us.
3830 		 */
3831 		rbr_p->rbr_state = RBR_UNMAPPED;
3832 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3833 		    "unmap_rxdma_buf_ring: %d %s outstanding.",
3834 		    rbr_p->rbr_ref_cnt,
3835 		    rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
3836 	}
3837 
3838 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3839 		"<== nxge_unmap_rxdma_channel_buf_ring"));
3840 }
3841 
3842 /*
3843  * nxge_rxdma_hw_start_common
3844  *
3845  * Arguments:
3846  * 	nxgep
3847  *
3848  * Notes:
3849  *
3850  * NPI/NXGE function calls:
3851  *	nxge_init_fzc_rx_common();
3852  *	nxge_init_fzc_rxdma_port();
3853  *
3854  * Registers accessed:
3855  *
3856  * Context:
3857  *	Service domain
3858  */
3859 static nxge_status_t
3860 nxge_rxdma_hw_start_common(p_nxge_t nxgep)
3861 {
3862 	nxge_status_t		status = NXGE_OK;
3863 
3864 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
3865 
3866 	/*
3867 	 * Load the sharable parameters by writing to the
3868 	 * function zero control registers. These FZC registers
3869 	 * should be initialized only once for the entire chip.
3870 	 */
3871 	(void) nxge_init_fzc_rx_common(nxgep);
3872 
3873 	/*
3874 	 * Initialize the RXDMA port specific FZC control configurations.
3875 	 * These FZC registers are pertaining to each port.
3876 	 */
3877 	(void) nxge_init_fzc_rxdma_port(nxgep);
3878 
3879 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
3880 
3881 	return (status);
3882 }
3883 
3884 static nxge_status_t
3885 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel)
3886 {
3887 	int			i, ndmas;
3888 	p_rx_rbr_rings_t 	rx_rbr_rings;
3889 	p_rx_rbr_ring_t		*rbr_rings;
3890 	p_rx_rcr_rings_t 	rx_rcr_rings;
3891 	p_rx_rcr_ring_t		*rcr_rings;
3892 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
3893 	p_rx_mbox_t		*rx_mbox_p;
3894 	nxge_status_t		status = NXGE_OK;
3895 
3896 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
3897 
3898 	rx_rbr_rings = nxgep->rx_rbr_rings;
3899 	rx_rcr_rings = nxgep->rx_rcr_rings;
3900 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
3901 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
3902 			"<== nxge_rxdma_hw_start: NULL ring pointers"));
3903 		return (NXGE_ERROR);
3904 	}
3905 	ndmas = rx_rbr_rings->ndmas;
3906 	if (ndmas == 0) {
3907 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
3908 			"<== nxge_rxdma_hw_start: no dma channel allocated"));
3909 		return (NXGE_ERROR);
3910 	}
3911 
3912 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3913 		"==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
3914 
3915 	rbr_rings = rx_rbr_rings->rbr_rings;
3916 	rcr_rings = rx_rcr_rings->rcr_rings;
3917 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
3918 	if (rx_mbox_areas_p) {
3919 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
3920 	}
3921 
3922 	i = channel;
3923 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3924 		"==> nxge_rxdma_hw_start (ndmas %d) channel %d",
3925 		ndmas, channel));
3926 	status = nxge_rxdma_start_channel(nxgep, channel,
3927 	    (p_rx_rbr_ring_t)rbr_rings[i],
3928 	    (p_rx_rcr_ring_t)rcr_rings[i],
3929 	    (p_rx_mbox_t)rx_mbox_p[i]);
3930 	if (status != NXGE_OK) {
3931 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3932 		    "==> nxge_rxdma_hw_start: disable "
3933 		    "(status 0x%x channel %d)", status, channel));
3934 		return (status);
3935 	}
3936 
3937 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
3938 		"rx_rbr_rings 0x%016llx rings 0x%016llx",
3939 		rx_rbr_rings, rx_rcr_rings));
3940 
3941 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3942 		"==> nxge_rxdma_hw_start: (status 0x%x)", status));
3943 
3944 	return (status);
3945 }
3946 
3947 static void
3948 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel)
3949 {
3950 	p_rx_rbr_rings_t 	rx_rbr_rings;
3951 	p_rx_rcr_rings_t 	rx_rcr_rings;
3952 
3953 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
3954 
3955 	rx_rbr_rings = nxgep->rx_rbr_rings;
3956 	rx_rcr_rings = nxgep->rx_rcr_rings;
3957 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
3958 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
3959 			"<== nxge_rxdma_hw_stop: NULL ring pointers"));
3960 		return;
3961 	}
3962 
3963 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3964 		"==> nxge_rxdma_hw_stop(channel %d)",
3965 		channel));
3966 	(void) nxge_rxdma_stop_channel(nxgep, channel);
3967 
3968 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
3969 		"rx_rbr_rings 0x%016llx rings 0x%016llx",
3970 		rx_rbr_rings, rx_rcr_rings));
3971 
3972 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
3973 }
3974 
3975 
3976 static nxge_status_t
3977 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
3978     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
3979 
3980 {
3981 	npi_handle_t		handle;
3982 	npi_status_t		rs = NPI_SUCCESS;
3983 	rx_dma_ctl_stat_t	cs;
3984 	rx_dma_ent_msk_t	ent_mask;
3985 	nxge_status_t		status = NXGE_OK;
3986 
3987 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
3988 
3989 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3990 
3991 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
3992 		"npi handle addr $%p acc $%p",
3993 		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
3994 
3995 	/* Reset RXDMA channel, but not if you're a guest. */
3996 	if (!isLDOMguest(nxgep)) {
3997 		rs = npi_rxdma_cfg_rdc_reset(handle, channel);
3998 		if (rs != NPI_SUCCESS) {
3999 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4000 			    "==> nxge_init_fzc_rdc: "
4001 			    "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
4002 			    channel, rs));
4003 			return (NXGE_ERROR | rs);
4004 		}
4005 
4006 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4007 		    "==> nxge_rxdma_start_channel: reset done: channel %d",
4008 		    channel));
4009 	}
4010 
4011 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4012 	if (isLDOMguest(nxgep))
4013 		(void) nxge_rdc_lp_conf(nxgep, channel);
4014 #endif
4015 
4016 	/*
4017 	 * Initialize the RXDMA channel specific FZC control
4018 	 * configurations. These FZC registers are pertaining
4019 	 * to each RX channel (logical pages).
4020 	 */
4021 	if (!isLDOMguest(nxgep)) {
4022 		status = nxge_init_fzc_rxdma_channel(nxgep, channel);
4023 		if (status != NXGE_OK) {
4024 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4025 				"==> nxge_rxdma_start_channel: "
4026 				"init fzc rxdma failed (0x%08x channel %d)",
4027 				status, channel));
4028 			return (status);
4029 		}
4030 
4031 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4032 			"==> nxge_rxdma_start_channel: fzc done"));
4033 	}
4034 
4035 	/* Set up the interrupt event masks. */
4036 	ent_mask.value = 0;
4037 	ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
4038 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4039 	    &ent_mask);
4040 	if (rs != NPI_SUCCESS) {
4041 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4042 			"==> nxge_rxdma_start_channel: "
4043 			"init rxdma event masks failed "
4044 			"(0x%08x channel %d)",
4045 			status, channel));
4046 		return (NXGE_ERROR | rs);
4047 	}
4048 
4049 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4050 		"==> nxge_rxdma_start_channel: "
4051 		"event done: channel %d (mask 0x%016llx)",
4052 		channel, ent_mask.value));
4053 
4054 	/* Initialize the receive DMA control and status register */
4055 	cs.value = 0;
4056 	cs.bits.hdw.mex = 1;
4057 	cs.bits.hdw.rcrthres = 1;
4058 	cs.bits.hdw.rcrto = 1;
4059 	cs.bits.hdw.rbr_empty = 1;
4060 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4061 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4062 		"channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
4063 	if (status != NXGE_OK) {
4064 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4065 			"==> nxge_rxdma_start_channel: "
4066 			"init rxdma control register failed (0x%08x channel %d",
4067 			status, channel));
4068 		return (status);
4069 	}
4070 
4071 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4072 		"control done - channel %d cs 0x%016llx", channel, cs.value));
4073 
4074 	/*
4075 	 * Load RXDMA descriptors, buffers, mailbox,
4076 	 * initialise the receive DMA channels and
4077 	 * enable each DMA channel.
4078 	 */
4079 	status = nxge_enable_rxdma_channel(nxgep,
4080 	    channel, rbr_p, rcr_p, mbox_p);
4081 
4082 	if (status != NXGE_OK) {
4083 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4084 		    " nxge_rxdma_start_channel: "
4085 		    " enable rxdma failed (0x%08x channel %d)",
4086 		    status, channel));
4087 		return (status);
4088 	}
4089 
4090 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4091 	    "==> nxge_rxdma_start_channel: enabled channel %d"));
4092 
4093 	if (isLDOMguest(nxgep)) {
4094 		/* Add interrupt handler for this channel. */
4095 		if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel)
4096 		    != NXGE_OK) {
4097 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4098 			    " nxge_rxdma_start_channel: "
4099 			    " nxge_hio_intr_add failed (0x%08x channel %d)",
4100 		    status, channel));
4101 		}
4102 	}
4103 
4104 	ent_mask.value = 0;
4105 	ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
4106 				RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
4107 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4108 			&ent_mask);
4109 	if (rs != NPI_SUCCESS) {
4110 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4111 			"==> nxge_rxdma_start_channel: "
4112 			"init rxdma event masks failed (0x%08x channel %d)",
4113 			status, channel));
4114 		return (NXGE_ERROR | rs);
4115 	}
4116 
4117 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4118 		"control done - channel %d cs 0x%016llx", channel, cs.value));
4119 
4120 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
4121 
4122 	return (NXGE_OK);
4123 }
4124 
4125 static nxge_status_t
4126 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
4127 {
4128 	npi_handle_t		handle;
4129 	npi_status_t		rs = NPI_SUCCESS;
4130 	rx_dma_ctl_stat_t	cs;
4131 	rx_dma_ent_msk_t	ent_mask;
4132 	nxge_status_t		status = NXGE_OK;
4133 
4134 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
4135 
4136 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
4137 
4138 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
4139 		"npi handle addr $%p acc $%p",
4140 		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4141 
4142 	/* Reset RXDMA channel */
4143 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4144 	if (rs != NPI_SUCCESS) {
4145 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4146 			    " nxge_rxdma_stop_channel: "
4147 			    " reset rxdma failed (0x%08x channel %d)",
4148 			    rs, channel));
4149 		return (NXGE_ERROR | rs);
4150 	}
4151 
4152 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
4153 		"==> nxge_rxdma_stop_channel: reset done"));
4154 
4155 	/* Set up the interrupt event masks. */
4156 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
4157 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4158 			&ent_mask);
4159 	if (rs != NPI_SUCCESS) {
4160 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4161 			    "==> nxge_rxdma_stop_channel: "
4162 			    "set rxdma event masks failed (0x%08x channel %d)",
4163 			    rs, channel));
4164 		return (NXGE_ERROR | rs);
4165 	}
4166 
4167 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
4168 		"==> nxge_rxdma_stop_channel: event done"));
4169 
4170 	/* Initialize the receive DMA control and status register */
4171 	cs.value = 0;
4172 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel,
4173 			&cs);
4174 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
4175 		" to default (all 0s) 0x%08x", cs.value));
4176 	if (status != NXGE_OK) {
4177 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4178 			    " nxge_rxdma_stop_channel: init rxdma"
4179 			    " control register failed (0x%08x channel %d",
4180 			status, channel));
4181 		return (status);
4182 	}
4183 
4184 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
4185 		"==> nxge_rxdma_stop_channel: control done"));
4186 
4187 	/* disable dma channel */
4188 	status = nxge_disable_rxdma_channel(nxgep, channel);
4189 
4190 	if (status != NXGE_OK) {
4191 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4192 			    " nxge_rxdma_stop_channel: "
4193 			    " init enable rxdma failed (0x%08x channel %d)",
4194 			    status, channel));
4195 		return (status);
4196 	}
4197 
4198 	NXGE_DEBUG_MSG((nxgep,
4199 		RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
4200 
4201 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
4202 
4203 	return (NXGE_OK);
4204 }
4205 
4206 nxge_status_t
4207 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
4208 {
4209 	npi_handle_t		handle;
4210 	p_nxge_rdc_sys_stats_t	statsp;
4211 	rx_ctl_dat_fifo_stat_t	stat;
4212 	uint32_t		zcp_err_status;
4213 	uint32_t		ipp_err_status;
4214 	nxge_status_t		status = NXGE_OK;
4215 	npi_status_t		rs = NPI_SUCCESS;
4216 	boolean_t		my_err = B_FALSE;
4217 
4218 	handle = nxgep->npi_handle;
4219 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4220 
4221 	rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
4222 
4223 	if (rs != NPI_SUCCESS)
4224 		return (NXGE_ERROR | rs);
4225 
4226 	if (stat.bits.ldw.id_mismatch) {
4227 		statsp->id_mismatch++;
4228 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
4229 					NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
4230 		/* Global fatal error encountered */
4231 	}
4232 
4233 	if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
4234 		switch (nxgep->mac.portnum) {
4235 		case 0:
4236 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
4237 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
4238 				my_err = B_TRUE;
4239 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
4240 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
4241 			}
4242 			break;
4243 		case 1:
4244 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
4245 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
4246 				my_err = B_TRUE;
4247 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
4248 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
4249 			}
4250 			break;
4251 		case 2:
4252 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
4253 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
4254 				my_err = B_TRUE;
4255 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
4256 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
4257 			}
4258 			break;
4259 		case 3:
4260 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
4261 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
4262 				my_err = B_TRUE;
4263 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
4264 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
4265 			}
4266 			break;
4267 		default:
4268 			return (NXGE_ERROR);
4269 		}
4270 	}
4271 
4272 	if (my_err) {
4273 		status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
4274 							zcp_err_status);
4275 		if (status != NXGE_OK)
4276 			return (status);
4277 	}
4278 
4279 	return (NXGE_OK);
4280 }
4281 
4282 static nxge_status_t
4283 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
4284 							uint32_t zcp_status)
4285 {
4286 	boolean_t		rxport_fatal = B_FALSE;
4287 	p_nxge_rdc_sys_stats_t	statsp;
4288 	nxge_status_t		status = NXGE_OK;
4289 	uint8_t			portn;
4290 
4291 	portn = nxgep->mac.portnum;
4292 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4293 
4294 	if (ipp_status & (0x1 << portn)) {
4295 		statsp->ipp_eop_err++;
4296 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
4297 					NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
4298 		rxport_fatal = B_TRUE;
4299 	}
4300 
4301 	if (zcp_status & (0x1 << portn)) {
4302 		statsp->zcp_eop_err++;
4303 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
4304 					NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
4305 		rxport_fatal = B_TRUE;
4306 	}
4307 
4308 	if (rxport_fatal) {
4309 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4310 			    " nxge_rxdma_handle_port_error: "
4311 			    " fatal error on Port #%d\n",
4312 				portn));
4313 		status = nxge_rx_port_fatal_err_recover(nxgep);
4314 		if (status == NXGE_OK) {
4315 			FM_SERVICE_RESTORED(nxgep);
4316 		}
4317 	}
4318 
4319 	return (status);
4320 }
4321 
4322 static nxge_status_t
4323 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
4324 {
4325 	npi_handle_t		handle;
4326 	npi_status_t		rs = NPI_SUCCESS;
4327 	nxge_status_t		status = NXGE_OK;
4328 	p_rx_rbr_ring_t		rbrp;
4329 	p_rx_rcr_ring_t		rcrp;
4330 	p_rx_mbox_t		mboxp;
4331 	rx_dma_ent_msk_t	ent_mask;
4332 	p_nxge_dma_common_t	dmap;
4333 	int			ring_idx;
4334 	uint32_t		ref_cnt;
4335 	p_rx_msg_t		rx_msg_p;
4336 	int			i;
4337 	uint32_t		nxge_port_rcr_size;
4338 
4339 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
4340 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4341 			"Recovering from RxDMAChannel#%d error...", channel));
4342 
4343 	/*
4344 	 * Stop the dma channel waits for the stop done.
4345 	 * If the stop done bit is not set, then create
4346 	 * an error.
4347 	 */
4348 
4349 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
4350 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
4351 
4352 	ring_idx = nxge_rxdma_get_ring_index(nxgep, channel);
4353 	rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx];
4354 	rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx];
4355 
4356 	MUTEX_ENTER(&rcrp->lock);
4357 	MUTEX_ENTER(&rbrp->lock);
4358 	MUTEX_ENTER(&rbrp->post_lock);
4359 
4360 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
4361 
4362 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
4363 	if (rs != NPI_SUCCESS) {
4364 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4365 			"nxge_disable_rxdma_channel:failed"));
4366 		goto fail;
4367 	}
4368 
4369 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
4370 
4371 	/* Disable interrupt */
4372 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
4373 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
4374 	if (rs != NPI_SUCCESS) {
4375 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4376 				"nxge_rxdma_stop_channel: "
4377 				"set rxdma event masks failed (channel %d)",
4378 				channel));
4379 	}
4380 
4381 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
4382 
4383 	/* Reset RXDMA channel */
4384 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4385 	if (rs != NPI_SUCCESS) {
4386 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4387 			"nxge_rxdma_fatal_err_recover: "
4388 				" reset rxdma failed (channel %d)", channel));
4389 		goto fail;
4390 	}
4391 
4392 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
4393 
4394 	mboxp =
4395 	(p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
4396 
4397 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
4398 	rbrp->rbr_rd_index = 0;
4399 
4400 	rcrp->comp_rd_index = 0;
4401 	rcrp->comp_wt_index = 0;
4402 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
4403 		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
4404 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4405 #if defined(__i386)
4406 		(p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4407 #else
4408 		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4409 #endif
4410 
4411 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
4412 		(nxge_port_rcr_size - 1);
4413 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
4414 		(nxge_port_rcr_size - 1);
4415 
4416 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
4417 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
4418 
4419 	cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
4420 
4421 	for (i = 0; i < rbrp->rbr_max_size; i++) {
4422 		rx_msg_p = rbrp->rx_msg_ring[i];
4423 		ref_cnt = rx_msg_p->ref_cnt;
4424 		if (ref_cnt != 1) {
4425 			if (rx_msg_p->cur_usage_cnt !=
4426 					rx_msg_p->max_usage_cnt) {
4427 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4428 						"buf[%d]: cur_usage_cnt = %d "
4429 						"max_usage_cnt = %d\n", i,
4430 						rx_msg_p->cur_usage_cnt,
4431 						rx_msg_p->max_usage_cnt));
4432 			} else {
4433 				/* Buffer can be re-posted */
4434 				rx_msg_p->free = B_TRUE;
4435 				rx_msg_p->cur_usage_cnt = 0;
4436 				rx_msg_p->max_usage_cnt = 0xbaddcafe;
4437 				rx_msg_p->pkt_buf_size = 0;
4438 			}
4439 		}
4440 	}
4441 
4442 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
4443 
4444 	status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
4445 	if (status != NXGE_OK) {
4446 		goto fail;
4447 	}
4448 
4449 	MUTEX_EXIT(&rbrp->post_lock);
4450 	MUTEX_EXIT(&rbrp->lock);
4451 	MUTEX_EXIT(&rcrp->lock);
4452 
4453 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4454 			"Recovery Successful, RxDMAChannel#%d Restored",
4455 			channel));
4456 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
4457 
4458 	return (NXGE_OK);
4459 fail:
4460 	MUTEX_EXIT(&rbrp->post_lock);
4461 	MUTEX_EXIT(&rbrp->lock);
4462 	MUTEX_EXIT(&rcrp->lock);
4463 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4464 
4465 	return (NXGE_ERROR | rs);
4466 }
4467 
4468 nxge_status_t
4469 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
4470 {
4471 	nxge_grp_set_t *set = &nxgep->rx_set;
4472 	nxge_status_t status = NXGE_OK;
4473 	int rdc;
4474 
4475 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
4476 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4477 				"Recovering from RxPort error..."));
4478 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n"));
4479 
4480 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
4481 		goto fail;
4482 
4483 	NXGE_DELAY(1000);
4484 
4485 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels..."));
4486 
4487 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
4488 		if ((1 << rdc) & set->owned.map) {
4489 			if (nxge_rxdma_fatal_err_recover(nxgep, rdc)
4490 			    != NXGE_OK) {
4491 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4492 				    "Could not recover channel %d", rdc));
4493 			}
4494 		}
4495 	}
4496 
4497 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP..."));
4498 
4499 	/* Reset IPP */
4500 	if (nxge_ipp_reset(nxgep) != NXGE_OK) {
4501 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4502 			"nxge_rx_port_fatal_err_recover: "
4503 			"Failed to reset IPP"));
4504 		goto fail;
4505 	}
4506 
4507 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
4508 
4509 	/* Reset RxMAC */
4510 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
4511 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4512 			"nxge_rx_port_fatal_err_recover: "
4513 			"Failed to reset RxMAC"));
4514 		goto fail;
4515 	}
4516 
4517 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
4518 
4519 	/* Re-Initialize IPP */
4520 	if (nxge_ipp_init(nxgep) != NXGE_OK) {
4521 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4522 			"nxge_rx_port_fatal_err_recover: "
4523 			"Failed to init IPP"));
4524 		goto fail;
4525 	}
4526 
4527 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
4528 
4529 	/* Re-Initialize RxMAC */
4530 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
4531 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4532 			"nxge_rx_port_fatal_err_recover: "
4533 			"Failed to reset RxMAC"));
4534 		goto fail;
4535 	}
4536 
4537 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
4538 
4539 	/* Re-enable RxMAC */
4540 	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
4541 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4542 			"nxge_rx_port_fatal_err_recover: "
4543 			"Failed to enable RxMAC"));
4544 		goto fail;
4545 	}
4546 
4547 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4548 			"Recovery Successful, RxPort Restored"));
4549 
4550 	return (NXGE_OK);
4551 fail:
4552 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4553 	return (status);
4554 }
4555 
4556 void
4557 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
4558 {
4559 	rx_dma_ctl_stat_t	cs;
4560 	rx_ctl_dat_fifo_stat_t	cdfs;
4561 
4562 	switch (err_id) {
4563 	case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
4564 	case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
4565 	case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
4566 	case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
4567 	case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
4568 	case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
4569 	case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
4570 	case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
4571 	case NXGE_FM_EREPORT_RDMC_RCRINCON:
4572 	case NXGE_FM_EREPORT_RDMC_RCRFULL:
4573 	case NXGE_FM_EREPORT_RDMC_RBRFULL:
4574 	case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
4575 	case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
4576 	case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
4577 		RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4578 			chan, &cs.value);
4579 		if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
4580 			cs.bits.hdw.rcr_ack_err = 1;
4581 		else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
4582 			cs.bits.hdw.dc_fifo_err = 1;
4583 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
4584 			cs.bits.hdw.rcr_sha_par = 1;
4585 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
4586 			cs.bits.hdw.rbr_pre_par = 1;
4587 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
4588 			cs.bits.hdw.rbr_tmout = 1;
4589 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
4590 			cs.bits.hdw.rsp_cnt_err = 1;
4591 		else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
4592 			cs.bits.hdw.byte_en_bus = 1;
4593 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
4594 			cs.bits.hdw.rsp_dat_err = 1;
4595 		else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
4596 			cs.bits.hdw.config_err = 1;
4597 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
4598 			cs.bits.hdw.rcrincon = 1;
4599 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
4600 			cs.bits.hdw.rcrfull = 1;
4601 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
4602 			cs.bits.hdw.rbrfull = 1;
4603 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
4604 			cs.bits.hdw.rbrlogpage = 1;
4605 		else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
4606 			cs.bits.hdw.cfiglogpage = 1;
4607 #if defined(__i386)
4608 		cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n",
4609 				cs.value);
4610 #else
4611 		cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
4612 				cs.value);
4613 #endif
4614 		RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4615 			chan, cs.value);
4616 		break;
4617 	case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
4618 	case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
4619 	case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
4620 		cdfs.value = 0;
4621 		if (err_id ==  NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
4622 			cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
4623 		else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
4624 			cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
4625 		else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
4626 			cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
4627 #if defined(__i386)
4628 		cmn_err(CE_NOTE,
4629 			"!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4630 			cdfs.value);
4631 #else
4632 		cmn_err(CE_NOTE,
4633 			"!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4634 			cdfs.value);
4635 #endif
4636 		NXGE_REG_WR64(nxgep->npi_handle,
4637 		    RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value);
4638 		break;
4639 	case NXGE_FM_EREPORT_RDMC_DCF_ERR:
4640 		break;
4641 	case NXGE_FM_EREPORT_RDMC_RCR_ERR:
4642 		break;
4643 	}
4644 }
4645 
4646 static void
4647 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)
4648 {
4649 	rxring_info_t 		*ring_info;
4650 	int			index;
4651 	uint32_t		chunk_size;
4652 	uint64_t		kaddr;
4653 	uint_t			num_blocks;
4654 
4655 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free"));
4656 
4657 	if (rbr_p == NULL) {
4658 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4659 		    "==> nxge_rxdma_databuf_free: NULL rbr pointer"));
4660 		return;
4661 	}
4662 
4663 	if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) {
4664 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4665 		    "==> nxge_rxdma_databuf_free: DDI"));
4666 		return;
4667 	}
4668 
4669 	ring_info = rbr_p->ring_info;
4670 	if (ring_info == NULL) {
4671 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4672 		    "==> nxge_rxdma_databuf_free: NULL ring info"));
4673 		return;
4674 	}
4675 	num_blocks = rbr_p->num_blocks;
4676 	for (index = 0; index < num_blocks; index++) {
4677 		kaddr = ring_info->buffer[index].kaddr;
4678 		chunk_size = ring_info->buffer[index].buf_size;
4679 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4680 		    "==> nxge_rxdma_databuf_free: free chunk %d "
4681 		    "kaddrp $%p chunk size %d",
4682 		    index, kaddr, chunk_size));
4683 		if (kaddr == NULL) continue;
4684 		nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size);
4685 		ring_info->buffer[index].kaddr = NULL;
4686 	}
4687 
4688 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free"));
4689 }
4690 
4691 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
4692 extern void contig_mem_free(void *, size_t);
4693 #endif
4694 
4695 void
4696 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size)
4697 {
4698 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf"));
4699 
4700 	if (kaddr == NULL || !buf_size) {
4701 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4702 		    "==> nxge_free_buf: invalid kaddr $%p size to free %d",
4703 		    kaddr, buf_size));
4704 		return;
4705 	}
4706 
4707 	switch (alloc_type) {
4708 	case KMEM_ALLOC:
4709 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4710 		    "==> nxge_free_buf: freeing kmem $%p size %d",
4711 		    kaddr, buf_size));
4712 #if defined(__i386)
4713 		KMEM_FREE((void *)(uint32_t)kaddr, buf_size);
4714 #else
4715 		KMEM_FREE((void *)kaddr, buf_size);
4716 #endif
4717 		break;
4718 
4719 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
4720 	case CONTIG_MEM_ALLOC:
4721 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4722 		    "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
4723 		    kaddr, buf_size));
4724 		contig_mem_free((void *)kaddr, buf_size);
4725 		break;
4726 #endif
4727 
4728 	default:
4729 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4730 		    "<== nxge_free_buf: unsupported alloc type %d",
4731 		    alloc_type));
4732 		return;
4733 	}
4734 
4735 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf"));
4736 }
4737