xref: /titanic_50/usr/src/uts/common/io/nxge/nxge_rxdma.c (revision 48bc00d6814e04ff3edb32cafe7d1bc580baff68)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_rxdma.h>
29 #include <sys/nxge/nxge_hio.h>
30 
31 #if !defined(_BIG_ENDIAN)
32 #include <npi_rx_rd32.h>
33 #endif
34 #include <npi_rx_rd64.h>
35 #include <npi_rx_wr64.h>
36 
37 #define	NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp)	\
38 	(rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
39 #define	NXGE_ACTUAL_RDC(nxgep, rdc)	\
40 	(rdc + nxgep->pt_config.hw_config.start_rdc)
41 
42 /*
43  * Globals: tunable parameters (/etc/system or adb)
44  *
45  */
46 extern uint32_t nxge_rbr_size;
47 extern uint32_t nxge_rcr_size;
48 extern uint32_t	nxge_rbr_spare_size;
49 
50 extern uint32_t nxge_mblks_pending;
51 
52 /*
53  * Tunable to reduce the amount of time spent in the
54  * ISR doing Rx Processing.
55  */
56 extern uint32_t nxge_max_rx_pkts;
57 
58 /*
59  * Tunables to manage the receive buffer blocks.
60  *
61  * nxge_rx_threshold_hi: copy all buffers.
62  * nxge_rx_bcopy_size_type: receive buffer block size type.
63  * nxge_rx_threshold_lo: copy only up to tunable block size type.
64  */
65 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
66 extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
67 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
68 
69 extern uint32_t	nxge_cksum_offload;
70 
71 static nxge_status_t nxge_map_rxdma(p_nxge_t, int);
72 static void nxge_unmap_rxdma(p_nxge_t, int);
73 
74 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
75 
76 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int);
77 static void nxge_rxdma_hw_stop(p_nxge_t, int);
78 
79 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
80     p_nxge_dma_common_t *,  p_rx_rbr_ring_t *,
81     uint32_t,
82     p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
83     p_rx_mbox_t *);
84 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
85     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
86 
87 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
88     uint16_t,
89     p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
90     p_rx_rcr_ring_t *, p_rx_mbox_t *);
91 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
92     p_rx_rcr_ring_t, p_rx_mbox_t);
93 
94 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
95     uint16_t,
96     p_nxge_dma_common_t *,
97     p_rx_rbr_ring_t *, uint32_t);
98 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
99     p_rx_rbr_ring_t);
100 
101 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
102     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
103 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
104 
105 static mblk_t *
106 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int);
107 
108 static void nxge_receive_packet(p_nxge_t,
109 	p_rx_rcr_ring_t,
110 	p_rcr_entry_t,
111 	boolean_t *,
112 	mblk_t **, mblk_t **);
113 
114 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
115 
116 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
117 static void nxge_freeb(p_rx_msg_t);
118 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
119 
120 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
121 				uint32_t, uint32_t);
122 
123 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
124     p_rx_rbr_ring_t);
125 
126 
127 static nxge_status_t
128 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
129 
130 nxge_status_t
131 nxge_rx_port_fatal_err_recover(p_nxge_t);
132 
133 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
134 
135 nxge_status_t
136 nxge_init_rxdma_channels(p_nxge_t nxgep)
137 {
138 	nxge_grp_set_t	*set = &nxgep->rx_set;
139 	int		i, count, channel;
140 	nxge_grp_t	*group;
141 	dc_map_t	map;
142 	int		dev_gindex;
143 
144 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
145 
146 	if (!isLDOMguest(nxgep)) {
147 		if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
148 			cmn_err(CE_NOTE, "hw_start_common");
149 			return (NXGE_ERROR);
150 		}
151 	}
152 
153 	/*
154 	 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
155 	 * We only have 8 hardware RDC tables, but we may have
156 	 * up to 16 logical (software-defined) groups of RDCS,
157 	 * if we make use of layer 3 & 4 hardware classification.
158 	 */
159 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
160 		if ((1 << i) & set->lg.map) {
161 			group = set->group[i];
162 			dev_gindex =
163 			    nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
164 			map = nxgep->pt_config.rdc_grps[dev_gindex].map;
165 			for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
166 				if ((1 << channel) & map) {
167 					if ((nxge_grp_dc_add(nxgep,
168 					    group, VP_BOUND_RX, channel)))
169 						goto init_rxdma_channels_exit;
170 				}
171 			}
172 		}
173 		if (++count == set->lg.count)
174 			break;
175 	}
176 
177 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
178 	return (NXGE_OK);
179 
180 init_rxdma_channels_exit:
181 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
182 		if ((1 << i) & set->lg.map) {
183 			group = set->group[i];
184 			dev_gindex =
185 			    nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
186 			map = nxgep->pt_config.rdc_grps[dev_gindex].map;
187 			for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
188 				if ((1 << channel) & map) {
189 					nxge_grp_dc_remove(nxgep,
190 					    VP_BOUND_RX, channel);
191 				}
192 			}
193 		}
194 		if (++count == set->lg.count)
195 			break;
196 	}
197 
198 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
199 	return (NXGE_ERROR);
200 }
201 
202 nxge_status_t
203 nxge_init_rxdma_channel(p_nxge_t nxge, int channel)
204 {
205 	nxge_status_t	status;
206 
207 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel"));
208 
209 	status = nxge_map_rxdma(nxge, channel);
210 	if (status != NXGE_OK) {
211 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
212 		    "<== nxge_init_rxdma: status 0x%x", status));
213 		return (status);
214 	}
215 
216 #if defined(sun4v)
217 	if (isLDOMguest(nxge)) {
218 		/* set rcr_ring */
219 		p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel];
220 
221 		status = nxge_hio_rxdma_bind_intr(nxge, ring, channel);
222 		if (status != NXGE_OK) {
223 			nxge_unmap_rxdma(nxge, channel);
224 			return (status);
225 		}
226 	}
227 #endif
228 
229 	status = nxge_rxdma_hw_start(nxge, channel);
230 	if (status != NXGE_OK) {
231 		nxge_unmap_rxdma(nxge, channel);
232 	}
233 
234 	if (!nxge->statsp->rdc_ksp[channel])
235 		nxge_setup_rdc_kstats(nxge, channel);
236 
237 	NXGE_DEBUG_MSG((nxge, MEM2_CTL,
238 	    "<== nxge_init_rxdma_channel: status 0x%x", status));
239 
240 	return (status);
241 }
242 
243 void
244 nxge_uninit_rxdma_channels(p_nxge_t nxgep)
245 {
246 	nxge_grp_set_t *set = &nxgep->rx_set;
247 	int rdc;
248 
249 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
250 
251 	if (set->owned.map == 0) {
252 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
253 		    "nxge_uninit_rxdma_channels: no channels"));
254 		return;
255 	}
256 
257 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
258 		if ((1 << rdc) & set->owned.map) {
259 			nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc);
260 		}
261 	}
262 
263 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels"));
264 }
265 
266 void
267 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel)
268 {
269 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel"));
270 
271 	if (nxgep->statsp->rdc_ksp[channel]) {
272 		kstat_delete(nxgep->statsp->rdc_ksp[channel]);
273 		nxgep->statsp->rdc_ksp[channel] = 0;
274 	}
275 
276 	nxge_rxdma_hw_stop(nxgep, channel);
277 	nxge_unmap_rxdma(nxgep, channel);
278 
279 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel"));
280 }
281 
282 nxge_status_t
283 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
284 {
285 	npi_handle_t		handle;
286 	npi_status_t		rs = NPI_SUCCESS;
287 	nxge_status_t		status = NXGE_OK;
288 
289 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel"));
290 
291 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
292 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
293 
294 	if (rs != NPI_SUCCESS) {
295 		status = NXGE_ERROR | rs;
296 	}
297 
298 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
299 
300 	return (status);
301 }
302 
303 void
304 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
305 {
306 	nxge_grp_set_t *set = &nxgep->rx_set;
307 	int rdc;
308 
309 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
310 
311 	if (!isLDOMguest(nxgep)) {
312 		npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
313 		(void) npi_rxdma_dump_fzc_regs(handle);
314 	}
315 
316 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
317 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
318 		    "nxge_rxdma_regs_dump_channels: "
319 		    "NULL ring pointer(s)"));
320 		return;
321 	}
322 
323 	if (set->owned.map == 0) {
324 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
325 		    "nxge_rxdma_regs_dump_channels: no channels"));
326 		return;
327 	}
328 
329 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
330 		if ((1 << rdc) & set->owned.map) {
331 			rx_rbr_ring_t *ring =
332 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
333 			if (ring) {
334 				(void) nxge_dump_rxdma_channel(nxgep, rdc);
335 			}
336 		}
337 	}
338 
339 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
340 }
341 
342 nxge_status_t
343 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
344 {
345 	npi_handle_t		handle;
346 	npi_status_t		rs = NPI_SUCCESS;
347 	nxge_status_t		status = NXGE_OK;
348 
349 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
350 
351 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
352 	rs = npi_rxdma_dump_rdc_regs(handle, channel);
353 
354 	if (rs != NPI_SUCCESS) {
355 		status = NXGE_ERROR | rs;
356 	}
357 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
358 	return (status);
359 }
360 
361 nxge_status_t
362 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
363     p_rx_dma_ent_msk_t mask_p)
364 {
365 	npi_handle_t		handle;
366 	npi_status_t		rs = NPI_SUCCESS;
367 	nxge_status_t		status = NXGE_OK;
368 
369 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
370 	    "<== nxge_init_rxdma_channel_event_mask"));
371 
372 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
373 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
374 	if (rs != NPI_SUCCESS) {
375 		status = NXGE_ERROR | rs;
376 	}
377 
378 	return (status);
379 }
380 
381 nxge_status_t
382 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
383     p_rx_dma_ctl_stat_t cs_p)
384 {
385 	npi_handle_t		handle;
386 	npi_status_t		rs = NPI_SUCCESS;
387 	nxge_status_t		status = NXGE_OK;
388 
389 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
390 	    "<== nxge_init_rxdma_channel_cntl_stat"));
391 
392 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
393 	rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
394 
395 	if (rs != NPI_SUCCESS) {
396 		status = NXGE_ERROR | rs;
397 	}
398 
399 	return (status);
400 }
401 
402 /*
403  * nxge_rxdma_cfg_rdcgrp_default_rdc
404  *
405  *	Set the default RDC for an RDC Group (Table)
406  *
407  * Arguments:
408  * 	nxgep
409  *	rdcgrp	The group to modify
410  *	rdc	The new default RDC.
411  *
412  * Notes:
413  *
414  * NPI/NXGE function calls:
415  *	npi_rxdma_cfg_rdc_table_default_rdc()
416  *
417  * Registers accessed:
418  *	RDC_TBL_REG: FZC_ZCP + 0x10000
419  *
420  * Context:
421  *	Service domain
422  */
423 nxge_status_t
424 nxge_rxdma_cfg_rdcgrp_default_rdc(
425 	p_nxge_t nxgep,
426 	uint8_t rdcgrp,
427 	uint8_t rdc)
428 {
429 	npi_handle_t		handle;
430 	npi_status_t		rs = NPI_SUCCESS;
431 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
432 	p_nxge_rdc_grp_t	rdc_grp_p;
433 	uint8_t actual_rdcgrp, actual_rdc;
434 
435 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
436 	    " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
437 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
438 
439 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
440 
441 	/*
442 	 * This has to be rewritten.  Do we even allow this anymore?
443 	 */
444 	rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
445 	RDC_MAP_IN(rdc_grp_p->map, rdc);
446 	rdc_grp_p->def_rdc = rdc;
447 
448 	actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
449 	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
450 
451 	rs = npi_rxdma_cfg_rdc_table_default_rdc(
452 	    handle, actual_rdcgrp, actual_rdc);
453 
454 	if (rs != NPI_SUCCESS) {
455 		return (NXGE_ERROR | rs);
456 	}
457 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
458 	    " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
459 	return (NXGE_OK);
460 }
461 
462 nxge_status_t
463 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
464 {
465 	npi_handle_t		handle;
466 
467 	uint8_t actual_rdc;
468 	npi_status_t		rs = NPI_SUCCESS;
469 
470 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
471 	    " ==> nxge_rxdma_cfg_port_default_rdc"));
472 
473 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
474 	actual_rdc = rdc;	/* XXX Hack! */
475 	rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
476 
477 
478 	if (rs != NPI_SUCCESS) {
479 		return (NXGE_ERROR | rs);
480 	}
481 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
482 	    " <== nxge_rxdma_cfg_port_default_rdc"));
483 
484 	return (NXGE_OK);
485 }
486 
487 nxge_status_t
488 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
489 				    uint16_t pkts)
490 {
491 	npi_status_t	rs = NPI_SUCCESS;
492 	npi_handle_t	handle;
493 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
494 	    " ==> nxge_rxdma_cfg_rcr_threshold"));
495 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
496 
497 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
498 
499 	if (rs != NPI_SUCCESS) {
500 		return (NXGE_ERROR | rs);
501 	}
502 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
503 	return (NXGE_OK);
504 }
505 
506 nxge_status_t
507 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
508 			    uint16_t tout, uint8_t enable)
509 {
510 	npi_status_t	rs = NPI_SUCCESS;
511 	npi_handle_t	handle;
512 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
513 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
514 	if (enable == 0) {
515 		rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
516 	} else {
517 		rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
518 		    tout);
519 	}
520 
521 	if (rs != NPI_SUCCESS) {
522 		return (NXGE_ERROR | rs);
523 	}
524 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
525 	return (NXGE_OK);
526 }
527 
528 nxge_status_t
529 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
530     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
531 {
532 	npi_handle_t		handle;
533 	rdc_desc_cfg_t 		rdc_desc;
534 	p_rcrcfig_b_t		cfgb_p;
535 	npi_status_t		rs = NPI_SUCCESS;
536 
537 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
538 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
539 	/*
540 	 * Use configuration data composed at init time.
541 	 * Write to hardware the receive ring configurations.
542 	 */
543 	rdc_desc.mbox_enable = 1;
544 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
545 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
546 	    "==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
547 	    mbox_p->mbox_addr, rdc_desc.mbox_addr));
548 
549 	rdc_desc.rbr_len = rbr_p->rbb_max;
550 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
551 
552 	switch (nxgep->rx_bksize_code) {
553 	case RBR_BKSIZE_4K:
554 		rdc_desc.page_size = SIZE_4KB;
555 		break;
556 	case RBR_BKSIZE_8K:
557 		rdc_desc.page_size = SIZE_8KB;
558 		break;
559 	case RBR_BKSIZE_16K:
560 		rdc_desc.page_size = SIZE_16KB;
561 		break;
562 	case RBR_BKSIZE_32K:
563 		rdc_desc.page_size = SIZE_32KB;
564 		break;
565 	}
566 
567 	rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
568 	rdc_desc.valid0 = 1;
569 
570 	rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
571 	rdc_desc.valid1 = 1;
572 
573 	rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
574 	rdc_desc.valid2 = 1;
575 
576 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
577 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
578 
579 	rdc_desc.rcr_len = rcr_p->comp_size;
580 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
581 
582 	cfgb_p = &(rcr_p->rcr_cfgb);
583 	rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
584 	/* For now, disable this timeout in a guest domain. */
585 	if (isLDOMguest(nxgep)) {
586 		rdc_desc.rcr_timeout = 0;
587 		rdc_desc.rcr_timeout_enable = 0;
588 	} else {
589 		rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
590 		rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
591 	}
592 
593 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
594 	    "rbr_len qlen %d pagesize code %d rcr_len %d",
595 	    rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
596 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
597 	    "size 0 %d size 1 %d size 2 %d",
598 	    rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
599 	    rbr_p->npi_pkt_buf_size2));
600 
601 	rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
602 	if (rs != NPI_SUCCESS) {
603 		return (NXGE_ERROR | rs);
604 	}
605 
606 	/*
607 	 * Enable the timeout and threshold.
608 	 */
609 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
610 	    rdc_desc.rcr_threshold);
611 	if (rs != NPI_SUCCESS) {
612 		return (NXGE_ERROR | rs);
613 	}
614 
615 	rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
616 	    rdc_desc.rcr_timeout);
617 	if (rs != NPI_SUCCESS) {
618 		return (NXGE_ERROR | rs);
619 	}
620 
621 	if (!isLDOMguest(nxgep)) {
622 		/* Enable the DMA */
623 		rs = npi_rxdma_cfg_rdc_enable(handle, channel);
624 		if (rs != NPI_SUCCESS) {
625 			return (NXGE_ERROR | rs);
626 		}
627 	}
628 
629 	/* Kick the DMA engine. */
630 	npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
631 
632 	if (!isLDOMguest(nxgep)) {
633 		/* Clear the rbr empty bit */
634 		(void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
635 	}
636 
637 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
638 
639 	return (NXGE_OK);
640 }
641 
642 nxge_status_t
643 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
644 {
645 	npi_handle_t		handle;
646 	npi_status_t		rs = NPI_SUCCESS;
647 
648 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
649 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
650 
651 	/* disable the DMA */
652 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
653 	if (rs != NPI_SUCCESS) {
654 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
655 		    "<== nxge_disable_rxdma_channel:failed (0x%x)",
656 		    rs));
657 		return (NXGE_ERROR | rs);
658 	}
659 
660 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
661 	return (NXGE_OK);
662 }
663 
664 nxge_status_t
665 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
666 {
667 	npi_handle_t		handle;
668 	nxge_status_t		status = NXGE_OK;
669 
670 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
671 	    "<== nxge_init_rxdma_channel_rcrflush"));
672 
673 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
674 	npi_rxdma_rdc_rcr_flush(handle, channel);
675 
676 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
677 	    "<== nxge_init_rxdma_channel_rcrflsh"));
678 	return (status);
679 
680 }
681 
682 #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
683 
684 #define	TO_LEFT -1
685 #define	TO_RIGHT 1
686 #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
687 #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
688 #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
689 #define	NO_HINT 0xffffffff
690 
691 /*ARGSUSED*/
692 nxge_status_t
693 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
694 	uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
695 	uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
696 {
697 	int			bufsize;
698 	uint64_t		pktbuf_pp;
699 	uint64_t 		dvma_addr;
700 	rxring_info_t 		*ring_info;
701 	int 			base_side, end_side;
702 	int 			r_index, l_index, anchor_index;
703 	int 			found, search_done;
704 	uint32_t offset, chunk_size, block_size, page_size_mask;
705 	uint32_t chunk_index, block_index, total_index;
706 	int 			max_iterations, iteration;
707 	rxbuf_index_info_t 	*bufinfo;
708 
709 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
710 
711 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
712 	    "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
713 	    pkt_buf_addr_pp,
714 	    pktbufsz_type));
715 #if defined(__i386)
716 	pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
717 #else
718 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
719 #endif
720 
721 	switch (pktbufsz_type) {
722 	case 0:
723 		bufsize = rbr_p->pkt_buf_size0;
724 		break;
725 	case 1:
726 		bufsize = rbr_p->pkt_buf_size1;
727 		break;
728 	case 2:
729 		bufsize = rbr_p->pkt_buf_size2;
730 		break;
731 	case RCR_SINGLE_BLOCK:
732 		bufsize = 0;
733 		anchor_index = 0;
734 		break;
735 	default:
736 		return (NXGE_ERROR);
737 	}
738 
739 	if (rbr_p->num_blocks == 1) {
740 		anchor_index = 0;
741 		ring_info = rbr_p->ring_info;
742 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
743 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
744 		    "==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
745 		    "buf_pp $%p btype %d anchor_index %d "
746 		    "bufinfo $%p",
747 		    pkt_buf_addr_pp,
748 		    pktbufsz_type,
749 		    anchor_index,
750 		    bufinfo));
751 
752 		goto found_index;
753 	}
754 
755 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
756 	    "==> nxge_rxbuf_pp_to_vp: "
757 	    "buf_pp $%p btype %d  anchor_index %d",
758 	    pkt_buf_addr_pp,
759 	    pktbufsz_type,
760 	    anchor_index));
761 
762 	ring_info = rbr_p->ring_info;
763 	found = B_FALSE;
764 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
765 	iteration = 0;
766 	max_iterations = ring_info->max_iterations;
767 		/*
768 		 * First check if this block has been seen
769 		 * recently. This is indicated by a hint which
770 		 * is initialized when the first buffer of the block
771 		 * is seen. The hint is reset when the last buffer of
772 		 * the block has been processed.
773 		 * As three block sizes are supported, three hints
774 		 * are kept. The idea behind the hints is that once
775 		 * the hardware  uses a block for a buffer  of that
776 		 * size, it will use it exclusively for that size
777 		 * and will use it until it is exhausted. It is assumed
778 		 * that there would a single block being used for the same
779 		 * buffer sizes at any given time.
780 		 */
781 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
782 		anchor_index = ring_info->hint[pktbufsz_type];
783 		dvma_addr =  bufinfo[anchor_index].dvma_addr;
784 		chunk_size = bufinfo[anchor_index].buf_size;
785 		if ((pktbuf_pp >= dvma_addr) &&
786 		    (pktbuf_pp < (dvma_addr + chunk_size))) {
787 			found = B_TRUE;
788 				/*
789 				 * check if this is the last buffer in the block
790 				 * If so, then reset the hint for the size;
791 				 */
792 
793 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
794 				ring_info->hint[pktbufsz_type] = NO_HINT;
795 		}
796 	}
797 
798 	if (found == B_FALSE) {
799 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
800 		    "==> nxge_rxbuf_pp_to_vp: (!found)"
801 		    "buf_pp $%p btype %d anchor_index %d",
802 		    pkt_buf_addr_pp,
803 		    pktbufsz_type,
804 		    anchor_index));
805 
806 			/*
807 			 * This is the first buffer of the block of this
808 			 * size. Need to search the whole information
809 			 * array.
810 			 * the search algorithm uses a binary tree search
811 			 * algorithm. It assumes that the information is
812 			 * already sorted with increasing order
813 			 * info[0] < info[1] < info[2]  .... < info[n-1]
814 			 * where n is the size of the information array
815 			 */
816 		r_index = rbr_p->num_blocks - 1;
817 		l_index = 0;
818 		search_done = B_FALSE;
819 		anchor_index = MID_INDEX(r_index, l_index);
820 		while (search_done == B_FALSE) {
821 			if ((r_index == l_index) ||
822 			    (iteration >= max_iterations))
823 				search_done = B_TRUE;
824 			end_side = TO_RIGHT; /* to the right */
825 			base_side = TO_LEFT; /* to the left */
826 			/* read the DVMA address information and sort it */
827 			dvma_addr =  bufinfo[anchor_index].dvma_addr;
828 			chunk_size = bufinfo[anchor_index].buf_size;
829 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
830 			    "==> nxge_rxbuf_pp_to_vp: (searching)"
831 			    "buf_pp $%p btype %d "
832 			    "anchor_index %d chunk_size %d dvmaaddr $%p",
833 			    pkt_buf_addr_pp,
834 			    pktbufsz_type,
835 			    anchor_index,
836 			    chunk_size,
837 			    dvma_addr));
838 
839 			if (pktbuf_pp >= dvma_addr)
840 				base_side = TO_RIGHT; /* to the right */
841 			if (pktbuf_pp < (dvma_addr + chunk_size))
842 				end_side = TO_LEFT; /* to the left */
843 
844 			switch (base_side + end_side) {
845 			case IN_MIDDLE:
846 				/* found */
847 				found = B_TRUE;
848 				search_done = B_TRUE;
849 				if ((pktbuf_pp + bufsize) <
850 				    (dvma_addr + chunk_size))
851 					ring_info->hint[pktbufsz_type] =
852 					    bufinfo[anchor_index].buf_index;
853 				break;
854 			case BOTH_RIGHT:
855 				/* not found: go to the right */
856 				l_index = anchor_index + 1;
857 				anchor_index = MID_INDEX(r_index, l_index);
858 				break;
859 
860 			case BOTH_LEFT:
861 				/* not found: go to the left */
862 				r_index = anchor_index - 1;
863 				anchor_index = MID_INDEX(r_index, l_index);
864 				break;
865 			default: /* should not come here */
866 				return (NXGE_ERROR);
867 			}
868 			iteration++;
869 		}
870 
871 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
872 		    "==> nxge_rxbuf_pp_to_vp: (search done)"
873 		    "buf_pp $%p btype %d anchor_index %d",
874 		    pkt_buf_addr_pp,
875 		    pktbufsz_type,
876 		    anchor_index));
877 	}
878 
879 	if (found == B_FALSE) {
880 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
881 		    "==> nxge_rxbuf_pp_to_vp: (search failed)"
882 		    "buf_pp $%p btype %d anchor_index %d",
883 		    pkt_buf_addr_pp,
884 		    pktbufsz_type,
885 		    anchor_index));
886 		return (NXGE_ERROR);
887 	}
888 
889 found_index:
890 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
891 	    "==> nxge_rxbuf_pp_to_vp: (FOUND1)"
892 	    "buf_pp $%p btype %d bufsize %d anchor_index %d",
893 	    pkt_buf_addr_pp,
894 	    pktbufsz_type,
895 	    bufsize,
896 	    anchor_index));
897 
898 	/* index of the first block in this chunk */
899 	chunk_index = bufinfo[anchor_index].start_index;
900 	dvma_addr =  bufinfo[anchor_index].dvma_addr;
901 	page_size_mask = ring_info->block_size_mask;
902 
903 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
904 	    "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
905 	    "buf_pp $%p btype %d bufsize %d "
906 	    "anchor_index %d chunk_index %d dvma $%p",
907 	    pkt_buf_addr_pp,
908 	    pktbufsz_type,
909 	    bufsize,
910 	    anchor_index,
911 	    chunk_index,
912 	    dvma_addr));
913 
914 	offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
915 	block_size = rbr_p->block_size; /* System  block(page) size */
916 
917 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
918 	    "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
919 	    "buf_pp $%p btype %d bufsize %d "
920 	    "anchor_index %d chunk_index %d dvma $%p "
921 	    "offset %d block_size %d",
922 	    pkt_buf_addr_pp,
923 	    pktbufsz_type,
924 	    bufsize,
925 	    anchor_index,
926 	    chunk_index,
927 	    dvma_addr,
928 	    offset,
929 	    block_size));
930 
931 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
932 
933 	block_index = (offset / block_size); /* index within chunk */
934 	total_index = chunk_index + block_index;
935 
936 
937 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
938 	    "==> nxge_rxbuf_pp_to_vp: "
939 	    "total_index %d dvma_addr $%p "
940 	    "offset %d block_size %d "
941 	    "block_index %d ",
942 	    total_index, dvma_addr,
943 	    offset, block_size,
944 	    block_index));
945 #if defined(__i386)
946 	*pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
947 	    (uint32_t)offset);
948 #else
949 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
950 	    (uint64_t)offset);
951 #endif
952 
953 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
954 	    "==> nxge_rxbuf_pp_to_vp: "
955 	    "total_index %d dvma_addr $%p "
956 	    "offset %d block_size %d "
957 	    "block_index %d "
958 	    "*pkt_buf_addr_p $%p",
959 	    total_index, dvma_addr,
960 	    offset, block_size,
961 	    block_index,
962 	    *pkt_buf_addr_p));
963 
964 
965 	*msg_index = total_index;
966 	*bufoffset =  (offset & page_size_mask);
967 
968 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
969 	    "==> nxge_rxbuf_pp_to_vp: get msg index: "
970 	    "msg_index %d bufoffset_index %d",
971 	    *msg_index,
972 	    *bufoffset));
973 
974 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
975 
976 	return (NXGE_OK);
977 }
978 
979 /*
980  * used by quick sort (qsort) function
981  * to perform comparison
982  */
983 static int
984 nxge_sort_compare(const void *p1, const void *p2)
985 {
986 
987 	rxbuf_index_info_t *a, *b;
988 
989 	a = (rxbuf_index_info_t *)p1;
990 	b = (rxbuf_index_info_t *)p2;
991 
992 	if (a->dvma_addr > b->dvma_addr)
993 		return (1);
994 	if (a->dvma_addr < b->dvma_addr)
995 		return (-1);
996 	return (0);
997 }
998 
999 
1000 
1001 /*
1002  * grabbed this sort implementation from common/syscall/avl.c
1003  *
1004  */
1005 /*
1006  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
1007  * v = Ptr to array/vector of objs
1008  * n = # objs in the array
1009  * s = size of each obj (must be multiples of a word size)
1010  * f = ptr to function to compare two objs
1011  *	returns (-1 = less than, 0 = equal, 1 = greater than
1012  */
1013 void
1014 nxge_ksort(caddr_t v, int n, int s, int (*f)())
1015 {
1016 	int g, i, j, ii;
1017 	unsigned int *p1, *p2;
1018 	unsigned int tmp;
1019 
1020 	/* No work to do */
1021 	if (v == NULL || n <= 1)
1022 		return;
1023 	/* Sanity check on arguments */
1024 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
1025 	ASSERT(s > 0);
1026 
1027 	for (g = n / 2; g > 0; g /= 2) {
1028 		for (i = g; i < n; i++) {
1029 			for (j = i - g; j >= 0 &&
1030 			    (*f)(v + j * s, v + (j + g) * s) == 1;
1031 			    j -= g) {
1032 				p1 = (unsigned *)(v + j * s);
1033 				p2 = (unsigned *)(v + (j + g) * s);
1034 				for (ii = 0; ii < s / 4; ii++) {
1035 					tmp = *p1;
1036 					*p1++ = *p2;
1037 					*p2++ = tmp;
1038 				}
1039 			}
1040 		}
1041 	}
1042 }
1043 
1044 /*
1045  * Initialize data structures required for rxdma
1046  * buffer dvma->vmem address lookup
1047  */
1048 /*ARGSUSED*/
1049 static nxge_status_t
1050 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
1051 {
1052 
1053 	int index;
1054 	rxring_info_t *ring_info;
1055 	int max_iteration = 0, max_index = 0;
1056 
1057 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
1058 
1059 	ring_info = rbrp->ring_info;
1060 	ring_info->hint[0] = NO_HINT;
1061 	ring_info->hint[1] = NO_HINT;
1062 	ring_info->hint[2] = NO_HINT;
1063 	max_index = rbrp->num_blocks;
1064 
1065 		/* read the DVMA address information and sort it */
1066 		/* do init of the information array */
1067 
1068 
1069 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1070 	    " nxge_rxbuf_index_info_init Sort ptrs"));
1071 
1072 		/* sort the array */
1073 	nxge_ksort((void *)ring_info->buffer, max_index,
1074 	    sizeof (rxbuf_index_info_t), nxge_sort_compare);
1075 
1076 
1077 
1078 	for (index = 0; index < max_index; index++) {
1079 		NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1080 		    " nxge_rxbuf_index_info_init: sorted chunk %d "
1081 		    " ioaddr $%p kaddr $%p size %x",
1082 		    index, ring_info->buffer[index].dvma_addr,
1083 		    ring_info->buffer[index].kaddr,
1084 		    ring_info->buffer[index].buf_size));
1085 	}
1086 
1087 	max_iteration = 0;
1088 	while (max_index >= (1ULL << max_iteration))
1089 		max_iteration++;
1090 	ring_info->max_iterations = max_iteration + 1;
1091 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1092 	    " nxge_rxbuf_index_info_init Find max iter %d",
1093 	    ring_info->max_iterations));
1094 
1095 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
1096 	return (NXGE_OK);
1097 }
1098 
1099 /* ARGSUSED */
1100 void
1101 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
1102 {
1103 #ifdef	NXGE_DEBUG
1104 
1105 	uint32_t bptr;
1106 	uint64_t pp;
1107 
1108 	bptr = entry_p->bits.hdw.pkt_buf_addr;
1109 
1110 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1111 	    "\trcr entry $%p "
1112 	    "\trcr entry 0x%0llx "
1113 	    "\trcr entry 0x%08x "
1114 	    "\trcr entry 0x%08x "
1115 	    "\tvalue 0x%0llx\n"
1116 	    "\tmulti = %d\n"
1117 	    "\tpkt_type = 0x%x\n"
1118 	    "\tzero_copy = %d\n"
1119 	    "\tnoport = %d\n"
1120 	    "\tpromis = %d\n"
1121 	    "\terror = 0x%04x\n"
1122 	    "\tdcf_err = 0x%01x\n"
1123 	    "\tl2_len = %d\n"
1124 	    "\tpktbufsize = %d\n"
1125 	    "\tpkt_buf_addr = $%p\n"
1126 	    "\tpkt_buf_addr (<< 6) = $%p\n",
1127 	    entry_p,
1128 	    *(int64_t *)entry_p,
1129 	    *(int32_t *)entry_p,
1130 	    *(int32_t *)((char *)entry_p + 32),
1131 	    entry_p->value,
1132 	    entry_p->bits.hdw.multi,
1133 	    entry_p->bits.hdw.pkt_type,
1134 	    entry_p->bits.hdw.zero_copy,
1135 	    entry_p->bits.hdw.noport,
1136 	    entry_p->bits.hdw.promis,
1137 	    entry_p->bits.hdw.error,
1138 	    entry_p->bits.hdw.dcf_err,
1139 	    entry_p->bits.hdw.l2_len,
1140 	    entry_p->bits.hdw.pktbufsz,
1141 	    bptr,
1142 	    entry_p->bits.ldw.pkt_buf_addr));
1143 
1144 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
1145 	    RCR_PKT_BUF_ADDR_SHIFT;
1146 
1147 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
1148 	    pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
1149 #endif
1150 }
1151 
1152 void
1153 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
1154 {
1155 	npi_handle_t		handle;
1156 	rbr_stat_t 		rbr_stat;
1157 	addr44_t 		hd_addr;
1158 	addr44_t 		tail_addr;
1159 	uint16_t 		qlen;
1160 
1161 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1162 	    "==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
1163 
1164 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1165 
1166 	/* RBR head */
1167 	hd_addr.addr = 0;
1168 	(void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
1169 #if defined(__i386)
1170 	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1171 	    (void *)(uint32_t)hd_addr.addr);
1172 #else
1173 	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1174 	    (void *)hd_addr.addr);
1175 #endif
1176 
1177 	/* RBR stats */
1178 	(void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
1179 	printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
1180 
1181 	/* RCR tail */
1182 	tail_addr.addr = 0;
1183 	(void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
1184 #if defined(__i386)
1185 	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1186 	    (void *)(uint32_t)tail_addr.addr);
1187 #else
1188 	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1189 	    (void *)tail_addr.addr);
1190 #endif
1191 
1192 	/* RCR qlen */
1193 	(void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
1194 	printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
1195 
1196 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1197 	    "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
1198 }
1199 
1200 nxge_status_t
1201 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1202 {
1203 	nxge_grp_set_t *set = &nxgep->rx_set;
1204 	nxge_status_t status;
1205 	npi_status_t rs;
1206 	int rdc;
1207 
1208 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1209 	    "==> nxge_rxdma_hw_mode: mode %d", enable));
1210 
1211 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1212 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1213 		    "<== nxge_rxdma_mode: not initialized"));
1214 		return (NXGE_ERROR);
1215 	}
1216 
1217 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1218 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1219 		    "<== nxge_tx_port_fatal_err_recover: "
1220 		    "NULL ring pointer(s)"));
1221 		return (NXGE_ERROR);
1222 	}
1223 
1224 	if (set->owned.map == 0) {
1225 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1226 		    "nxge_rxdma_regs_dump_channels: no channels"));
1227 		return (NULL);
1228 	}
1229 
1230 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1231 		if ((1 << rdc) & set->owned.map) {
1232 			rx_rbr_ring_t *ring =
1233 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
1234 			npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
1235 			if (ring) {
1236 				if (enable) {
1237 					NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1238 					    "==> nxge_rxdma_hw_mode: "
1239 					    "channel %d (enable)", rdc));
1240 					rs = npi_rxdma_cfg_rdc_enable
1241 					    (handle, rdc);
1242 				} else {
1243 					NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1244 					    "==> nxge_rxdma_hw_mode: "
1245 					    "channel %d disable)", rdc));
1246 					rs = npi_rxdma_cfg_rdc_disable
1247 					    (handle, rdc);
1248 				}
1249 			}
1250 		}
1251 	}
1252 
1253 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1254 
1255 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1256 	    "<== nxge_rxdma_hw_mode: status 0x%x", status));
1257 
1258 	return (status);
1259 }
1260 
1261 void
1262 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1263 {
1264 	npi_handle_t		handle;
1265 
1266 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1267 	    "==> nxge_rxdma_enable_channel: channel %d", channel));
1268 
1269 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1270 	(void) npi_rxdma_cfg_rdc_enable(handle, channel);
1271 
1272 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
1273 }
1274 
1275 void
1276 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1277 {
1278 	npi_handle_t		handle;
1279 
1280 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1281 	    "==> nxge_rxdma_disable_channel: channel %d", channel));
1282 
1283 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1284 	(void) npi_rxdma_cfg_rdc_disable(handle, channel);
1285 
1286 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
1287 }
1288 
1289 void
1290 nxge_hw_start_rx(p_nxge_t nxgep)
1291 {
1292 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
1293 
1294 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1295 	(void) nxge_rx_mac_enable(nxgep);
1296 
1297 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
1298 }
1299 
1300 /*ARGSUSED*/
1301 void
1302 nxge_fixup_rxdma_rings(p_nxge_t nxgep)
1303 {
1304 	nxge_grp_set_t *set = &nxgep->rx_set;
1305 	int rdc;
1306 
1307 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
1308 
1309 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1310 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1311 		    "<== nxge_tx_port_fatal_err_recover: "
1312 		    "NULL ring pointer(s)"));
1313 		return;
1314 	}
1315 
1316 	if (set->owned.map == 0) {
1317 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1318 		    "nxge_rxdma_regs_dump_channels: no channels"));
1319 		return;
1320 	}
1321 
1322 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1323 		if ((1 << rdc) & set->owned.map) {
1324 			rx_rbr_ring_t *ring =
1325 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
1326 			if (ring) {
1327 				nxge_rxdma_hw_stop(nxgep, rdc);
1328 				NXGE_DEBUG_MSG((nxgep, RX_CTL,
1329 				    "==> nxge_fixup_rxdma_rings: "
1330 				    "channel %d ring $%px",
1331 				    rdc, ring));
1332 				(void) nxge_rxdma_fix_channel(nxgep, rdc);
1333 			}
1334 		}
1335 	}
1336 
1337 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
1338 }
1339 
1340 void
1341 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1342 {
1343 	int			ndmas;
1344 	p_rx_rbr_rings_t 	rx_rbr_rings;
1345 	p_rx_rbr_ring_t		*rbr_rings;
1346 	p_rx_rcr_rings_t 	rx_rcr_rings;
1347 	p_rx_rcr_ring_t		*rcr_rings;
1348 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
1349 	p_rx_mbox_t		*rx_mbox_p;
1350 	p_nxge_dma_pool_t	dma_buf_poolp;
1351 	p_nxge_dma_pool_t	dma_cntl_poolp;
1352 	p_rx_rbr_ring_t 	rbrp;
1353 	p_rx_rcr_ring_t 	rcrp;
1354 	p_rx_mbox_t 		mboxp;
1355 	p_nxge_dma_common_t 	dmap;
1356 	nxge_status_t		status = NXGE_OK;
1357 
1358 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
1359 
1360 	(void) nxge_rxdma_stop_channel(nxgep, channel);
1361 
1362 	dma_buf_poolp = nxgep->rx_buf_pool_p;
1363 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
1364 
1365 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1366 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1367 		    "<== nxge_rxdma_fix_channel: buf not allocated"));
1368 		return;
1369 	}
1370 
1371 	ndmas = dma_buf_poolp->ndmas;
1372 	if (!ndmas) {
1373 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1374 		    "<== nxge_rxdma_fix_channel: no dma allocated"));
1375 		return;
1376 	}
1377 
1378 	rx_rbr_rings = nxgep->rx_rbr_rings;
1379 	rx_rcr_rings = nxgep->rx_rcr_rings;
1380 	rbr_rings = rx_rbr_rings->rbr_rings;
1381 	rcr_rings = rx_rcr_rings->rcr_rings;
1382 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
1383 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
1384 
1385 	/* Reinitialize the receive block and completion rings */
1386 	rbrp = (p_rx_rbr_ring_t)rbr_rings[channel],
1387 	    rcrp = (p_rx_rcr_ring_t)rcr_rings[channel],
1388 	    mboxp = (p_rx_mbox_t)rx_mbox_p[channel];
1389 
1390 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
1391 	rbrp->rbr_rd_index = 0;
1392 	rcrp->comp_rd_index = 0;
1393 	rcrp->comp_wt_index = 0;
1394 
1395 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
1396 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
1397 
1398 	status = nxge_rxdma_start_channel(nxgep, channel,
1399 	    rbrp, rcrp, mboxp);
1400 	if (status != NXGE_OK) {
1401 		goto nxge_rxdma_fix_channel_fail;
1402 	}
1403 
1404 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1405 	    "<== nxge_rxdma_fix_channel: success (0x%08x)", status));
1406 	return;
1407 
1408 nxge_rxdma_fix_channel_fail:
1409 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1410 	    "<== nxge_rxdma_fix_channel: failed (0x%08x)", status));
1411 }
1412 
1413 p_rx_rbr_ring_t
1414 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
1415 {
1416 	nxge_grp_set_t *set = &nxgep->rx_set;
1417 	nxge_channel_t rdc;
1418 
1419 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1420 	    "==> nxge_rxdma_get_rbr_ring: channel %d", channel));
1421 
1422 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1423 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1424 		    "<== nxge_rxdma_get_rbr_ring: "
1425 		    "NULL ring pointer(s)"));
1426 		return (NULL);
1427 	}
1428 
1429 	if (set->owned.map == 0) {
1430 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1431 		    "<== nxge_rxdma_get_rbr_ring: no channels"));
1432 		return (NULL);
1433 	}
1434 
1435 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1436 		if ((1 << rdc) & set->owned.map) {
1437 			rx_rbr_ring_t *ring =
1438 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
1439 			if (ring) {
1440 				if (channel == ring->rdc) {
1441 					NXGE_DEBUG_MSG((nxgep, RX_CTL,
1442 					    "==> nxge_rxdma_get_rbr_ring: "
1443 					    "channel %d ring $%p", rdc, ring));
1444 					return (ring);
1445 				}
1446 			}
1447 		}
1448 	}
1449 
1450 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1451 	    "<== nxge_rxdma_get_rbr_ring: not found"));
1452 
1453 	return (NULL);
1454 }
1455 
1456 p_rx_rcr_ring_t
1457 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
1458 {
1459 	nxge_grp_set_t *set = &nxgep->rx_set;
1460 	nxge_channel_t rdc;
1461 
1462 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1463 	    "==> nxge_rxdma_get_rcr_ring: channel %d", channel));
1464 
1465 	if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) {
1466 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1467 		    "<== nxge_rxdma_get_rcr_ring: "
1468 		    "NULL ring pointer(s)"));
1469 		return (NULL);
1470 	}
1471 
1472 	if (set->owned.map == 0) {
1473 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1474 		    "<== nxge_rxdma_get_rbr_ring: no channels"));
1475 		return (NULL);
1476 	}
1477 
1478 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1479 		if ((1 << rdc) & set->owned.map) {
1480 			rx_rcr_ring_t *ring =
1481 			    nxgep->rx_rcr_rings->rcr_rings[rdc];
1482 			if (ring) {
1483 				if (channel == ring->rdc) {
1484 					NXGE_DEBUG_MSG((nxgep, RX_CTL,
1485 					    "==> nxge_rxdma_get_rcr_ring: "
1486 					    "channel %d ring $%p", rdc, ring));
1487 					return (ring);
1488 				}
1489 			}
1490 		}
1491 	}
1492 
1493 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1494 	    "<== nxge_rxdma_get_rcr_ring: not found"));
1495 
1496 	return (NULL);
1497 }
1498 
1499 /*
1500  * Static functions start here.
1501  */
1502 static p_rx_msg_t
1503 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
1504 {
1505 	p_rx_msg_t nxge_mp 		= NULL;
1506 	p_nxge_dma_common_t		dmamsg_p;
1507 	uchar_t 			*buffer;
1508 
1509 	nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
1510 	if (nxge_mp == NULL) {
1511 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1512 		    "Allocation of a rx msg failed."));
1513 		goto nxge_allocb_exit;
1514 	}
1515 
1516 	nxge_mp->use_buf_pool = B_FALSE;
1517 	if (dmabuf_p) {
1518 		nxge_mp->use_buf_pool = B_TRUE;
1519 		dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
1520 		*dmamsg_p = *dmabuf_p;
1521 		dmamsg_p->nblocks = 1;
1522 		dmamsg_p->block_size = size;
1523 		dmamsg_p->alength = size;
1524 		buffer = (uchar_t *)dmabuf_p->kaddrp;
1525 
1526 		dmabuf_p->kaddrp = (void *)
1527 		    ((char *)dmabuf_p->kaddrp + size);
1528 		dmabuf_p->ioaddr_pp = (void *)
1529 		    ((char *)dmabuf_p->ioaddr_pp + size);
1530 		dmabuf_p->alength -= size;
1531 		dmabuf_p->offset += size;
1532 		dmabuf_p->dma_cookie.dmac_laddress += size;
1533 		dmabuf_p->dma_cookie.dmac_size -= size;
1534 
1535 	} else {
1536 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
1537 		if (buffer == NULL) {
1538 			NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1539 			    "Allocation of a receive page failed."));
1540 			goto nxge_allocb_fail1;
1541 		}
1542 	}
1543 
1544 	nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
1545 	if (nxge_mp->rx_mblk_p == NULL) {
1546 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
1547 		goto nxge_allocb_fail2;
1548 	}
1549 
1550 	nxge_mp->buffer = buffer;
1551 	nxge_mp->block_size = size;
1552 	nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
1553 	nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
1554 	nxge_mp->ref_cnt = 1;
1555 	nxge_mp->free = B_TRUE;
1556 	nxge_mp->rx_use_bcopy = B_FALSE;
1557 
1558 	atomic_inc_32(&nxge_mblks_pending);
1559 
1560 	goto nxge_allocb_exit;
1561 
1562 nxge_allocb_fail2:
1563 	if (!nxge_mp->use_buf_pool) {
1564 		KMEM_FREE(buffer, size);
1565 	}
1566 
1567 nxge_allocb_fail1:
1568 	KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
1569 	nxge_mp = NULL;
1570 
1571 nxge_allocb_exit:
1572 	return (nxge_mp);
1573 }
1574 
1575 p_mblk_t
1576 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1577 {
1578 	p_mblk_t mp;
1579 
1580 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
1581 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
1582 	    "offset = 0x%08X "
1583 	    "size = 0x%08X",
1584 	    nxge_mp, offset, size));
1585 
1586 	mp = desballoc(&nxge_mp->buffer[offset], size,
1587 	    0, &nxge_mp->freeb);
1588 	if (mp == NULL) {
1589 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1590 		goto nxge_dupb_exit;
1591 	}
1592 	atomic_inc_32(&nxge_mp->ref_cnt);
1593 
1594 
1595 nxge_dupb_exit:
1596 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1597 	    nxge_mp));
1598 	return (mp);
1599 }
1600 
1601 p_mblk_t
1602 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1603 {
1604 	p_mblk_t mp;
1605 	uchar_t *dp;
1606 
1607 	mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
1608 	if (mp == NULL) {
1609 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1610 		goto nxge_dupb_bcopy_exit;
1611 	}
1612 	dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
1613 	bcopy((void *)&nxge_mp->buffer[offset], dp, size);
1614 	mp->b_wptr = dp + size;
1615 
1616 nxge_dupb_bcopy_exit:
1617 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1618 	    nxge_mp));
1619 	return (mp);
1620 }
1621 
1622 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
1623 	p_rx_msg_t rx_msg_p);
1624 
1625 void
1626 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
1627 {
1628 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
1629 
1630 	/* Reuse this buffer */
1631 	rx_msg_p->free = B_FALSE;
1632 	rx_msg_p->cur_usage_cnt = 0;
1633 	rx_msg_p->max_usage_cnt = 0;
1634 	rx_msg_p->pkt_buf_size = 0;
1635 
1636 	if (rx_rbr_p->rbr_use_bcopy) {
1637 		rx_msg_p->rx_use_bcopy = B_FALSE;
1638 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
1639 	}
1640 
1641 	/*
1642 	 * Get the rbr header pointer and its offset index.
1643 	 */
1644 	MUTEX_ENTER(&rx_rbr_p->post_lock);
1645 	rx_rbr_p->rbr_wr_index =  ((rx_rbr_p->rbr_wr_index + 1) &
1646 	    rx_rbr_p->rbr_wrap_mask);
1647 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1648 	MUTEX_EXIT(&rx_rbr_p->post_lock);
1649 	npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep),
1650 	    rx_rbr_p->rdc, 1);
1651 
1652 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1653 	    "<== nxge_post_page (channel %d post_next_index %d)",
1654 	    rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1655 
1656 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
1657 }
1658 
1659 void
1660 nxge_freeb(p_rx_msg_t rx_msg_p)
1661 {
1662 	size_t size;
1663 	uchar_t *buffer = NULL;
1664 	int ref_cnt;
1665 	boolean_t free_state = B_FALSE;
1666 
1667 	rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
1668 
1669 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
1670 	NXGE_DEBUG_MSG((NULL, MEM2_CTL,
1671 	    "nxge_freeb:rx_msg_p = $%p (block pending %d)",
1672 	    rx_msg_p, nxge_mblks_pending));
1673 
1674 	/*
1675 	 * First we need to get the free state, then
1676 	 * atomic decrement the reference count to prevent
1677 	 * the race condition with the interrupt thread that
1678 	 * is processing a loaned up buffer block.
1679 	 */
1680 	free_state = rx_msg_p->free;
1681 	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
1682 	if (!ref_cnt) {
1683 		atomic_dec_32(&nxge_mblks_pending);
1684 		buffer = rx_msg_p->buffer;
1685 		size = rx_msg_p->block_size;
1686 		NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
1687 		    "will free: rx_msg_p = $%p (block pending %d)",
1688 		    rx_msg_p, nxge_mblks_pending));
1689 
1690 		if (!rx_msg_p->use_buf_pool) {
1691 			KMEM_FREE(buffer, size);
1692 		}
1693 
1694 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1695 
1696 		if (ring) {
1697 			/*
1698 			 * Decrement the receive buffer ring's reference
1699 			 * count, too.
1700 			 */
1701 			atomic_dec_32(&ring->rbr_ref_cnt);
1702 
1703 			/*
1704 			 * Free the receive buffer ring, if
1705 			 * 1. all the receive buffers have been freed
1706 			 * 2. and we are in the proper state (that is,
1707 			 *    we are not UNMAPPING).
1708 			 */
1709 			if (ring->rbr_ref_cnt == 0 &&
1710 			    ring->rbr_state == RBR_UNMAPPED) {
1711 				/*
1712 				 * Free receive data buffers,
1713 				 * buffer index information
1714 				 * (rxring_info) and
1715 				 * the message block ring.
1716 				 */
1717 				NXGE_DEBUG_MSG((NULL, RX_CTL,
1718 				    "nxge_freeb:rx_msg_p = $%p "
1719 				    "(block pending %d) free buffers",
1720 				    rx_msg_p, nxge_mblks_pending));
1721 				nxge_rxdma_databuf_free(ring);
1722 				if (ring->ring_info) {
1723 					KMEM_FREE(ring->ring_info,
1724 					    sizeof (rxring_info_t));
1725 				}
1726 
1727 				if (ring->rx_msg_ring) {
1728 					KMEM_FREE(ring->rx_msg_ring,
1729 					    ring->tnblocks *
1730 					    sizeof (p_rx_msg_t));
1731 				}
1732 				KMEM_FREE(ring, sizeof (*ring));
1733 			}
1734 		}
1735 		return;
1736 	}
1737 
1738 	/*
1739 	 * Repost buffer.
1740 	 */
1741 	if (free_state && (ref_cnt == 1) && ring) {
1742 		NXGE_DEBUG_MSG((NULL, RX_CTL,
1743 		    "nxge_freeb: post page $%p:", rx_msg_p));
1744 		if (ring->rbr_state == RBR_POSTING)
1745 			nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
1746 	}
1747 
1748 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
1749 }
1750 
1751 uint_t
1752 nxge_rx_intr(void *arg1, void *arg2)
1753 {
1754 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
1755 	p_nxge_t		nxgep = (p_nxge_t)arg2;
1756 	p_nxge_ldg_t		ldgp;
1757 	uint8_t			channel;
1758 	npi_handle_t		handle;
1759 	rx_dma_ctl_stat_t	cs;
1760 	p_rx_rcr_ring_t		rcrp;
1761 	mblk_t			*mp = NULL;
1762 
1763 	if (ldvp == NULL) {
1764 		NXGE_DEBUG_MSG((NULL, INT_CTL,
1765 		    "<== nxge_rx_intr: arg2 $%p arg1 $%p",
1766 		    nxgep, ldvp));
1767 		return (DDI_INTR_CLAIMED);
1768 	}
1769 
1770 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1771 		nxgep = ldvp->nxgep;
1772 	}
1773 
1774 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1775 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1776 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1777 		    "<== nxge_rx_intr: interface not started or intialized"));
1778 		return (DDI_INTR_CLAIMED);
1779 	}
1780 
1781 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1782 	    "==> nxge_rx_intr: arg2 $%p arg1 $%p",
1783 	    nxgep, ldvp));
1784 
1785 	/*
1786 	 * Get the PIO handle.
1787 	 */
1788 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1789 
1790 	/*
1791 	 * Get the ring to enable us to process packets.
1792 	 */
1793 	rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index];
1794 
1795 	/*
1796 	 * The RCR ring lock must be held when packets
1797 	 * are being processed and the hardware registers are
1798 	 * being read or written to prevent race condition
1799 	 * among the interrupt thread, the polling thread
1800 	 * (will cause fatal errors such as rcrincon bit set)
1801 	 * and the setting of the poll_flag.
1802 	 */
1803 	MUTEX_ENTER(&rcrp->lock);
1804 
1805 	/*
1806 	 * Get the control and status for this channel.
1807 	 */
1808 	channel = ldvp->channel;
1809 	ldgp = ldvp->ldgp;
1810 
1811 	if (!isLDOMguest(nxgep) && (!nxgep->rx_channel_started[channel])) {
1812 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1813 		    "<== nxge_rx_intr: channel is not started"));
1814 
1815 		/*
1816 		 * We received an interrupt before the ring is started.
1817 		 */
1818 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1819 		    &cs.value);
1820 		cs.value &= RX_DMA_CTL_STAT_WR1C;
1821 		cs.bits.hdw.mex = 1;
1822 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1823 		    cs.value);
1824 
1825 		/*
1826 		 * Rearm this logical group if this is a single device
1827 		 * group.
1828 		 */
1829 		if (ldgp->nldvs == 1) {
1830 			if (isLDOMguest(nxgep)) {
1831 				nxge_hio_ldgimgn(nxgep, ldgp);
1832 			} else {
1833 				ldgimgm_t mgm;
1834 
1835 				mgm.value = 0;
1836 				mgm.bits.ldw.arm = 1;
1837 				mgm.bits.ldw.timer = ldgp->ldg_timer;
1838 
1839 				NXGE_REG_WR64(handle,
1840 				    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1841 				    mgm.value);
1842 			}
1843 		}
1844 		MUTEX_EXIT(&rcrp->lock);
1845 		return (DDI_INTR_CLAIMED);
1846 	}
1847 
1848 	ASSERT(rcrp->ldgp == ldgp);
1849 	ASSERT(rcrp->ldvp == ldvp);
1850 
1851 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
1852 
1853 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
1854 	    "cs 0x%016llx rcrto 0x%x rcrthres %x",
1855 	    channel,
1856 	    cs.value,
1857 	    cs.bits.hdw.rcrto,
1858 	    cs.bits.hdw.rcrthres));
1859 
1860 	if (!rcrp->poll_flag) {
1861 		mp = nxge_rx_pkts(nxgep, rcrp, cs, -1);
1862 	}
1863 
1864 	/* error events. */
1865 	if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1866 		(void) nxge_rx_err_evnts(nxgep, channel, cs);
1867 	}
1868 
1869 	/*
1870 	 * Enable the mailbox update interrupt if we want
1871 	 * to use mailbox. We probably don't need to use
1872 	 * mailbox as it only saves us one pio read.
1873 	 * Also write 1 to rcrthres and rcrto to clear
1874 	 * these two edge triggered bits.
1875 	 */
1876 	cs.value &= RX_DMA_CTL_STAT_WR1C;
1877 	cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1;
1878 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1879 	    cs.value);
1880 
1881 	/*
1882 	 * If the polling mode is enabled, disable the interrupt.
1883 	 */
1884 	if (rcrp->poll_flag) {
1885 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1886 		    "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
1887 		    "(disabling interrupts)", channel, ldgp, ldvp));
1888 
1889 		/*
1890 		 * Disarm this logical group if this is a single device
1891 		 * group.
1892 		 */
1893 		if (ldgp->nldvs == 1) {
1894 			if (isLDOMguest(nxgep)) {
1895 				ldgp->arm = B_FALSE;
1896 				nxge_hio_ldgimgn(nxgep, ldgp);
1897 			} else {
1898 				ldgimgm_t mgm;
1899 				mgm.value = 0;
1900 				mgm.bits.ldw.arm = 0;
1901 				NXGE_REG_WR64(handle,
1902 				    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1903 				    mgm.value);
1904 			}
1905 		}
1906 	} else {
1907 		/*
1908 		 * Rearm this logical group if this is a single device
1909 		 * group.
1910 		 */
1911 		if (ldgp->nldvs == 1) {
1912 			if (isLDOMguest(nxgep)) {
1913 				nxge_hio_ldgimgn(nxgep, ldgp);
1914 			} else {
1915 				ldgimgm_t mgm;
1916 
1917 				mgm.value = 0;
1918 				mgm.bits.ldw.arm = 1;
1919 				mgm.bits.ldw.timer = ldgp->ldg_timer;
1920 
1921 				NXGE_REG_WR64(handle,
1922 				    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1923 				    mgm.value);
1924 			}
1925 		}
1926 
1927 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1928 		    "==> nxge_rx_intr: rdc %d ldgp $%p "
1929 		    "exiting ISR (and call mac_rx_ring)", channel, ldgp));
1930 	}
1931 	MUTEX_EXIT(&rcrp->lock);
1932 
1933 	if (mp != NULL) {
1934 		mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp,
1935 		    rcrp->rcr_gen_num);
1936 	}
1937 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
1938 	return (DDI_INTR_CLAIMED);
1939 }
1940 
1941 /*
1942  * This routine is the main packet receive processing function.
1943  * It gets the packet type, error code, and buffer related
1944  * information from the receive completion entry.
1945  * How many completion entries to process is based on the number of packets
1946  * queued by the hardware, a hardware maintained tail pointer
1947  * and a configurable receive packet count.
1948  *
1949  * A chain of message blocks will be created as result of processing
1950  * the completion entries. This chain of message blocks will be returned and
1951  * a hardware control status register will be updated with the number of
1952  * packets were removed from the hardware queue.
1953  *
1954  * The RCR ring lock is held when entering this function.
1955  */
1956 static mblk_t *
1957 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
1958     int bytes_to_pickup)
1959 {
1960 	npi_handle_t		handle;
1961 	uint8_t			channel;
1962 	uint32_t		comp_rd_index;
1963 	p_rcr_entry_t		rcr_desc_rd_head_p;
1964 	p_rcr_entry_t		rcr_desc_rd_head_pp;
1965 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
1966 	uint16_t		qlen, nrcr_read, npkt_read;
1967 	uint32_t		qlen_hw;
1968 	boolean_t		multi;
1969 	rcrcfig_b_t		rcr_cfg_b;
1970 	int			totallen = 0;
1971 #if defined(_BIG_ENDIAN)
1972 	npi_status_t		rs = NPI_SUCCESS;
1973 #endif
1974 
1975 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: "
1976 	    "channel %d", rcr_p->rdc));
1977 
1978 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1979 		return (NULL);
1980 	}
1981 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1982 	channel = rcr_p->rdc;
1983 
1984 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1985 	    "==> nxge_rx_pkts: START: rcr channel %d "
1986 	    "head_p $%p head_pp $%p  index %d ",
1987 	    channel, rcr_p->rcr_desc_rd_head_p,
1988 	    rcr_p->rcr_desc_rd_head_pp,
1989 	    rcr_p->comp_rd_index));
1990 
1991 
1992 #if !defined(_BIG_ENDIAN)
1993 	qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
1994 #else
1995 	rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
1996 	if (rs != NPI_SUCCESS) {
1997 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
1998 		"channel %d, get qlen failed 0x%08x",
1999 		    channel, rs));
2000 		return (NULL);
2001 	}
2002 #endif
2003 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
2004 	    "qlen %d", channel, qlen));
2005 
2006 
2007 
2008 	if (!qlen) {
2009 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2010 		    "==> nxge_rx_pkts:rcr channel %d "
2011 		    "qlen %d (no pkts)", channel, qlen));
2012 
2013 		return (NULL);
2014 	}
2015 
2016 	comp_rd_index = rcr_p->comp_rd_index;
2017 
2018 	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
2019 	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
2020 	nrcr_read = npkt_read = 0;
2021 
2022 	/*
2023 	 * Number of packets queued
2024 	 * (The jumbo or multi packet will be counted as only one
2025 	 *  packets and it may take up more than one completion entry).
2026 	 */
2027 	qlen_hw = (qlen < nxge_max_rx_pkts) ?
2028 	    qlen : nxge_max_rx_pkts;
2029 	head_mp = NULL;
2030 	tail_mp = &head_mp;
2031 	nmp = mp_cont = NULL;
2032 	multi = B_FALSE;
2033 
2034 	while (qlen_hw) {
2035 
2036 #ifdef NXGE_DEBUG
2037 		nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
2038 #endif
2039 		/*
2040 		 * Process one completion ring entry.
2041 		 */
2042 		nxge_receive_packet(nxgep,
2043 		    rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
2044 
2045 		/*
2046 		 * message chaining modes
2047 		 */
2048 		if (nmp) {
2049 			nmp->b_next = NULL;
2050 			if (!multi && !mp_cont) { /* frame fits a partition */
2051 				*tail_mp = nmp;
2052 				tail_mp = &nmp->b_next;
2053 				totallen += MBLKL(nmp);
2054 				nmp = NULL;
2055 			} else if (multi && !mp_cont) { /* first segment */
2056 				*tail_mp = nmp;
2057 				tail_mp = &nmp->b_cont;
2058 				totallen += MBLKL(nmp);
2059 			} else if (multi && mp_cont) {	/* mid of multi segs */
2060 				*tail_mp = mp_cont;
2061 				tail_mp = &mp_cont->b_cont;
2062 				totallen += MBLKL(mp_cont);
2063 			} else if (!multi && mp_cont) { /* last segment */
2064 				*tail_mp = mp_cont;
2065 				tail_mp = &nmp->b_next;
2066 				totallen += MBLKL(mp_cont);
2067 				nmp = NULL;
2068 			}
2069 		}
2070 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2071 		    "==> nxge_rx_pkts: loop: rcr channel %d "
2072 		    "before updating: multi %d "
2073 		    "nrcr_read %d "
2074 		    "npk read %d "
2075 		    "head_pp $%p  index %d ",
2076 		    channel,
2077 		    multi,
2078 		    nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2079 		    comp_rd_index));
2080 
2081 		if (!multi) {
2082 			qlen_hw--;
2083 			npkt_read++;
2084 		}
2085 
2086 		/*
2087 		 * Update the next read entry.
2088 		 */
2089 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
2090 		    rcr_p->comp_wrap_mask);
2091 
2092 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
2093 		    rcr_p->rcr_desc_first_p,
2094 		    rcr_p->rcr_desc_last_p);
2095 
2096 		nrcr_read++;
2097 
2098 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2099 		    "<== nxge_rx_pkts: (SAM, process one packet) "
2100 		    "nrcr_read %d",
2101 		    nrcr_read));
2102 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2103 		    "==> nxge_rx_pkts: loop: rcr channel %d "
2104 		    "multi %d "
2105 		    "nrcr_read %d "
2106 		    "npk read %d "
2107 		    "head_pp $%p  index %d ",
2108 		    channel,
2109 		    multi,
2110 		    nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2111 		    comp_rd_index));
2112 
2113 		if ((bytes_to_pickup != -1) &&
2114 		    (totallen >= bytes_to_pickup)) {
2115 			break;
2116 		}
2117 	}
2118 
2119 	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
2120 	rcr_p->comp_rd_index = comp_rd_index;
2121 	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
2122 	if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
2123 	    (nxgep->intr_threshold != rcr_p->intr_threshold)) {
2124 
2125 		rcr_p->intr_timeout = (nxgep->intr_timeout <
2126 		    NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
2127 		    nxgep->intr_timeout;
2128 
2129 		rcr_p->intr_threshold = (nxgep->intr_threshold <
2130 		    NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
2131 		    nxgep->intr_threshold;
2132 
2133 		rcr_cfg_b.value = 0x0ULL;
2134 		rcr_cfg_b.bits.ldw.entout = 1;
2135 		rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
2136 		rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
2137 
2138 		RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
2139 		    channel, rcr_cfg_b.value);
2140 	}
2141 
2142 	cs.bits.ldw.pktread = npkt_read;
2143 	cs.bits.ldw.ptrread = nrcr_read;
2144 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2145 	    channel, cs.value);
2146 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
2147 	    "==> nxge_rx_pkts: EXIT: rcr channel %d "
2148 	    "head_pp $%p  index %016llx ",
2149 	    channel,
2150 	    rcr_p->rcr_desc_rd_head_pp,
2151 	    rcr_p->comp_rd_index));
2152 	/*
2153 	 * Update RCR buffer pointer read and number of packets
2154 	 * read.
2155 	 */
2156 
2157 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return"
2158 	    "channel %d", rcr_p->rdc));
2159 
2160 	return (head_mp);
2161 }
2162 
2163 void
2164 nxge_receive_packet(p_nxge_t nxgep,
2165     p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
2166     boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
2167 {
2168 	p_mblk_t		nmp = NULL;
2169 	uint64_t		multi;
2170 	uint64_t		dcf_err;
2171 	uint8_t			channel;
2172 
2173 	boolean_t		first_entry = B_TRUE;
2174 	boolean_t		is_tcp_udp = B_FALSE;
2175 	boolean_t		buffer_free = B_FALSE;
2176 	boolean_t		error_send_up = B_FALSE;
2177 	uint8_t			error_type;
2178 	uint16_t		l2_len;
2179 	uint16_t		skip_len;
2180 	uint8_t			pktbufsz_type;
2181 	uint64_t		rcr_entry;
2182 	uint64_t		*pkt_buf_addr_pp;
2183 	uint64_t		*pkt_buf_addr_p;
2184 	uint32_t		buf_offset;
2185 	uint32_t		bsize;
2186 	uint32_t		error_disp_cnt;
2187 	uint32_t		msg_index;
2188 	p_rx_rbr_ring_t		rx_rbr_p;
2189 	p_rx_msg_t 		*rx_msg_ring_p;
2190 	p_rx_msg_t		rx_msg_p;
2191 	uint16_t		sw_offset_bytes = 0, hdr_size = 0;
2192 	nxge_status_t		status = NXGE_OK;
2193 	boolean_t		is_valid = B_FALSE;
2194 	p_nxge_rx_ring_stats_t	rdc_stats;
2195 	uint32_t		bytes_read;
2196 	uint64_t		pkt_type;
2197 	uint64_t		frag;
2198 	boolean_t		pkt_too_long_err = B_FALSE;
2199 #ifdef	NXGE_DEBUG
2200 	int			dump_len;
2201 #endif
2202 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
2203 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
2204 
2205 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
2206 
2207 	multi = (rcr_entry & RCR_MULTI_MASK);
2208 	dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
2209 	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
2210 
2211 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
2212 	frag = (rcr_entry & RCR_FRAG_MASK);
2213 
2214 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
2215 
2216 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
2217 	    RCR_PKTBUFSZ_SHIFT);
2218 #if defined(__i386)
2219 	pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
2220 	    RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
2221 #else
2222 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
2223 	    RCR_PKT_BUF_ADDR_SHIFT);
2224 #endif
2225 
2226 	channel = rcr_p->rdc;
2227 
2228 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2229 	    "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2230 	    "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2231 	    "error_type 0x%x pkt_type 0x%x  "
2232 	    "pktbufsz_type %d ",
2233 	    rcr_desc_rd_head_p,
2234 	    rcr_entry, pkt_buf_addr_pp, l2_len,
2235 	    multi,
2236 	    error_type,
2237 	    pkt_type,
2238 	    pktbufsz_type));
2239 
2240 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2241 	    "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2242 	    "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2243 	    "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
2244 	    rcr_entry, pkt_buf_addr_pp, l2_len,
2245 	    multi,
2246 	    error_type,
2247 	    pkt_type));
2248 
2249 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2250 	    "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2251 	    "full pkt_buf_addr_pp $%p l2_len %d",
2252 	    rcr_entry, pkt_buf_addr_pp, l2_len));
2253 
2254 	/* get the stats ptr */
2255 	rdc_stats = rcr_p->rdc_stats;
2256 
2257 	if (!l2_len) {
2258 
2259 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2260 		    "<== nxge_receive_packet: failed: l2 length is 0."));
2261 		return;
2262 	}
2263 
2264 	/*
2265 	 * Software workaround for BMAC hardware limitation that allows
2266 	 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2267 	 * instead of 0x2400 for jumbo.
2268 	 */
2269 	if (l2_len > nxgep->mac.maxframesize) {
2270 		pkt_too_long_err = B_TRUE;
2271 	}
2272 
2273 	/* Hardware sends us 4 bytes of CRC as no stripping is done.  */
2274 	l2_len -= ETHERFCSL;
2275 
2276 	/* shift 6 bits to get the full io address */
2277 #if defined(__i386)
2278 	pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
2279 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
2280 #else
2281 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
2282 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
2283 #endif
2284 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2285 	    "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2286 	    "full pkt_buf_addr_pp $%p l2_len %d",
2287 	    rcr_entry, pkt_buf_addr_pp, l2_len));
2288 
2289 	rx_rbr_p = rcr_p->rx_rbr_p;
2290 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
2291 
2292 	if (first_entry) {
2293 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
2294 		    RXDMA_HDR_SIZE_DEFAULT);
2295 
2296 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2297 		    "==> nxge_receive_packet: first entry 0x%016llx "
2298 		    "pkt_buf_addr_pp $%p l2_len %d hdr %d",
2299 		    rcr_entry, pkt_buf_addr_pp, l2_len,
2300 		    hdr_size));
2301 	}
2302 
2303 	MUTEX_ENTER(&rx_rbr_p->lock);
2304 
2305 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
2306 	    "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2307 	    "full pkt_buf_addr_pp $%p l2_len %d",
2308 	    rcr_entry, pkt_buf_addr_pp, l2_len));
2309 
2310 	/*
2311 	 * Packet buffer address in the completion entry points
2312 	 * to the starting buffer address (offset 0).
2313 	 * Use the starting buffer address to locate the corresponding
2314 	 * kernel address.
2315 	 */
2316 	status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
2317 	    pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
2318 	    &buf_offset,
2319 	    &msg_index);
2320 
2321 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
2322 	    "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2323 	    "full pkt_buf_addr_pp $%p l2_len %d",
2324 	    rcr_entry, pkt_buf_addr_pp, l2_len));
2325 
2326 	if (status != NXGE_OK) {
2327 		MUTEX_EXIT(&rx_rbr_p->lock);
2328 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2329 		    "<== nxge_receive_packet: found vaddr failed %d",
2330 		    status));
2331 		return;
2332 	}
2333 
2334 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2335 	    "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
2336 	    "full pkt_buf_addr_pp $%p l2_len %d",
2337 	    rcr_entry, pkt_buf_addr_pp, l2_len));
2338 
2339 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2340 	    "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2341 	    "full pkt_buf_addr_pp $%p l2_len %d",
2342 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2343 
2344 	rx_msg_p = rx_msg_ring_p[msg_index];
2345 
2346 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2347 	    "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2348 	    "full pkt_buf_addr_pp $%p l2_len %d",
2349 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2350 
2351 	switch (pktbufsz_type) {
2352 	case RCR_PKTBUFSZ_0:
2353 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
2354 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2355 		    "==> nxge_receive_packet: 0 buf %d", bsize));
2356 		break;
2357 	case RCR_PKTBUFSZ_1:
2358 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
2359 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2360 		    "==> nxge_receive_packet: 1 buf %d", bsize));
2361 		break;
2362 	case RCR_PKTBUFSZ_2:
2363 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
2364 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2365 		    "==> nxge_receive_packet: 2 buf %d", bsize));
2366 		break;
2367 	case RCR_SINGLE_BLOCK:
2368 		bsize = rx_msg_p->block_size;
2369 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2370 		    "==> nxge_receive_packet: single %d", bsize));
2371 
2372 		break;
2373 	default:
2374 		MUTEX_EXIT(&rx_rbr_p->lock);
2375 		return;
2376 	}
2377 
2378 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
2379 	    (buf_offset + sw_offset_bytes),
2380 	    (hdr_size + l2_len),
2381 	    DDI_DMA_SYNC_FORCPU);
2382 
2383 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2384 	    "==> nxge_receive_packet: after first dump:usage count"));
2385 
2386 	if (rx_msg_p->cur_usage_cnt == 0) {
2387 		if (rx_rbr_p->rbr_use_bcopy) {
2388 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
2389 			if (rx_rbr_p->rbr_consumed <
2390 			    rx_rbr_p->rbr_threshold_hi) {
2391 				if (rx_rbr_p->rbr_threshold_lo == 0 ||
2392 				    ((rx_rbr_p->rbr_consumed >=
2393 				    rx_rbr_p->rbr_threshold_lo) &&
2394 				    (rx_rbr_p->rbr_bufsize_type >=
2395 				    pktbufsz_type))) {
2396 					rx_msg_p->rx_use_bcopy = B_TRUE;
2397 				}
2398 			} else {
2399 				rx_msg_p->rx_use_bcopy = B_TRUE;
2400 			}
2401 		}
2402 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2403 		    "==> nxge_receive_packet: buf %d (new block) ",
2404 		    bsize));
2405 
2406 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
2407 		rx_msg_p->pkt_buf_size = bsize;
2408 		rx_msg_p->cur_usage_cnt = 1;
2409 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
2410 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2411 			    "==> nxge_receive_packet: buf %d "
2412 			    "(single block) ",
2413 			    bsize));
2414 			/*
2415 			 * Buffer can be reused once the free function
2416 			 * is called.
2417 			 */
2418 			rx_msg_p->max_usage_cnt = 1;
2419 			buffer_free = B_TRUE;
2420 		} else {
2421 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
2422 			if (rx_msg_p->max_usage_cnt == 1) {
2423 				buffer_free = B_TRUE;
2424 			}
2425 		}
2426 	} else {
2427 		rx_msg_p->cur_usage_cnt++;
2428 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
2429 			buffer_free = B_TRUE;
2430 		}
2431 	}
2432 
2433 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
2434 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
2435 	    msg_index, l2_len,
2436 	    rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
2437 
2438 	if ((error_type) || (dcf_err) || (pkt_too_long_err)) {
2439 		rdc_stats->ierrors++;
2440 		if (dcf_err) {
2441 			rdc_stats->dcf_err++;
2442 #ifdef	NXGE_DEBUG
2443 			if (!rdc_stats->dcf_err) {
2444 				NXGE_DEBUG_MSG((nxgep, RX_CTL,
2445 				"nxge_receive_packet: channel %d dcf_err rcr"
2446 				" 0x%llx", channel, rcr_entry));
2447 			}
2448 #endif
2449 			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
2450 			    NXGE_FM_EREPORT_RDMC_DCF_ERR);
2451 		} else if (pkt_too_long_err) {
2452 			rdc_stats->pkt_too_long_err++;
2453 			NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:"
2454 			    " channel %d packet length [%d] > "
2455 			    "maxframesize [%d]", channel, l2_len + ETHERFCSL,
2456 			    nxgep->mac.maxframesize));
2457 		} else {
2458 				/* Update error stats */
2459 			error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2460 			rdc_stats->errlog.compl_err_type = error_type;
2461 
2462 			switch (error_type) {
2463 			/*
2464 			 * Do not send FMA ereport for RCR_L2_ERROR and
2465 			 * RCR_L4_CSUM_ERROR because most likely they indicate
2466 			 * back pressure rather than HW failures.
2467 			 */
2468 			case RCR_L2_ERROR:
2469 				rdc_stats->l2_err++;
2470 				if (rdc_stats->l2_err <
2471 				    error_disp_cnt) {
2472 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2473 					    " nxge_receive_packet:"
2474 					    " channel %d RCR L2_ERROR",
2475 					    channel));
2476 				}
2477 				break;
2478 			case RCR_L4_CSUM_ERROR:
2479 				error_send_up = B_TRUE;
2480 				rdc_stats->l4_cksum_err++;
2481 				if (rdc_stats->l4_cksum_err <
2482 				    error_disp_cnt) {
2483 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2484 					    " nxge_receive_packet:"
2485 					    " channel %d"
2486 					    " RCR L4_CSUM_ERROR", channel));
2487 				}
2488 				break;
2489 			/*
2490 			 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2491 			 * RCR_ZCP_SOFT_ERROR because they reflect the same
2492 			 * FFLP and ZCP errors that have been reported by
2493 			 * nxge_fflp.c and nxge_zcp.c.
2494 			 */
2495 			case RCR_FFLP_SOFT_ERROR:
2496 				error_send_up = B_TRUE;
2497 				rdc_stats->fflp_soft_err++;
2498 				if (rdc_stats->fflp_soft_err <
2499 				    error_disp_cnt) {
2500 					NXGE_ERROR_MSG((nxgep,
2501 					    NXGE_ERR_CTL,
2502 					    " nxge_receive_packet:"
2503 					    " channel %d"
2504 					    " RCR FFLP_SOFT_ERROR", channel));
2505 				}
2506 				break;
2507 			case RCR_ZCP_SOFT_ERROR:
2508 				error_send_up = B_TRUE;
2509 				rdc_stats->fflp_soft_err++;
2510 				if (rdc_stats->zcp_soft_err <
2511 				    error_disp_cnt)
2512 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2513 					    " nxge_receive_packet: Channel %d"
2514 					    " RCR ZCP_SOFT_ERROR", channel));
2515 				break;
2516 			default:
2517 				rdc_stats->rcr_unknown_err++;
2518 				if (rdc_stats->rcr_unknown_err
2519 				    < error_disp_cnt) {
2520 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2521 					    " nxge_receive_packet: Channel %d"
2522 					    " RCR entry 0x%llx error 0x%x",
2523 					    rcr_entry, channel, error_type));
2524 				}
2525 				break;
2526 			}
2527 		}
2528 
2529 		/*
2530 		 * Update and repost buffer block if max usage
2531 		 * count is reached.
2532 		 */
2533 		if (error_send_up == B_FALSE) {
2534 			atomic_inc_32(&rx_msg_p->ref_cnt);
2535 			if (buffer_free == B_TRUE) {
2536 				rx_msg_p->free = B_TRUE;
2537 			}
2538 
2539 			MUTEX_EXIT(&rx_rbr_p->lock);
2540 			nxge_freeb(rx_msg_p);
2541 			return;
2542 		}
2543 	}
2544 
2545 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2546 	    "==> nxge_receive_packet: DMA sync second "));
2547 
2548 	bytes_read = rcr_p->rcvd_pkt_bytes;
2549 	skip_len = sw_offset_bytes + hdr_size;
2550 	if (!rx_msg_p->rx_use_bcopy) {
2551 		/*
2552 		 * For loaned up buffers, the driver reference count
2553 		 * will be incremented first and then the free state.
2554 		 */
2555 		if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
2556 			if (first_entry) {
2557 				nmp->b_rptr = &nmp->b_rptr[skip_len];
2558 				if (l2_len < bsize - skip_len) {
2559 					nmp->b_wptr = &nmp->b_rptr[l2_len];
2560 				} else {
2561 					nmp->b_wptr = &nmp->b_rptr[bsize
2562 					    - skip_len];
2563 				}
2564 			} else {
2565 				if (l2_len - bytes_read < bsize) {
2566 					nmp->b_wptr =
2567 					    &nmp->b_rptr[l2_len - bytes_read];
2568 				} else {
2569 					nmp->b_wptr = &nmp->b_rptr[bsize];
2570 				}
2571 			}
2572 		}
2573 	} else {
2574 		if (first_entry) {
2575 			nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
2576 			    l2_len < bsize - skip_len ?
2577 			    l2_len : bsize - skip_len);
2578 		} else {
2579 			nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
2580 			    l2_len - bytes_read < bsize ?
2581 			    l2_len - bytes_read : bsize);
2582 		}
2583 	}
2584 	if (nmp != NULL) {
2585 		if (first_entry) {
2586 			/*
2587 			 * Jumbo packets may be received with more than one
2588 			 * buffer, increment ipackets for the first entry only.
2589 			 */
2590 			rdc_stats->ipackets++;
2591 
2592 			/* Update ibytes for kstat. */
2593 			rdc_stats->ibytes += skip_len
2594 			    + l2_len < bsize ? l2_len : bsize;
2595 			/*
2596 			 * Update the number of bytes read so far for the
2597 			 * current frame.
2598 			 */
2599 			bytes_read  = nmp->b_wptr - nmp->b_rptr;
2600 		} else {
2601 			rdc_stats->ibytes += l2_len - bytes_read < bsize ?
2602 			    l2_len - bytes_read : bsize;
2603 			bytes_read += nmp->b_wptr - nmp->b_rptr;
2604 		}
2605 
2606 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
2607 		    "==> nxge_receive_packet after dupb: "
2608 		    "rbr consumed %d "
2609 		    "pktbufsz_type %d "
2610 		    "nmp $%p rptr $%p wptr $%p "
2611 		    "buf_offset %d bzise %d l2_len %d skip_len %d",
2612 		    rx_rbr_p->rbr_consumed,
2613 		    pktbufsz_type,
2614 		    nmp, nmp->b_rptr, nmp->b_wptr,
2615 		    buf_offset, bsize, l2_len, skip_len));
2616 	} else {
2617 		cmn_err(CE_WARN, "!nxge_receive_packet: "
2618 		    "update stats (error)");
2619 		atomic_inc_32(&rx_msg_p->ref_cnt);
2620 		if (buffer_free == B_TRUE) {
2621 			rx_msg_p->free = B_TRUE;
2622 		}
2623 		MUTEX_EXIT(&rx_rbr_p->lock);
2624 		nxge_freeb(rx_msg_p);
2625 		return;
2626 	}
2627 
2628 	if (buffer_free == B_TRUE) {
2629 		rx_msg_p->free = B_TRUE;
2630 	}
2631 
2632 	is_valid = (nmp != NULL);
2633 
2634 	rcr_p->rcvd_pkt_bytes = bytes_read;
2635 
2636 	MUTEX_EXIT(&rx_rbr_p->lock);
2637 
2638 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2639 		atomic_inc_32(&rx_msg_p->ref_cnt);
2640 		nxge_freeb(rx_msg_p);
2641 	}
2642 
2643 	if (is_valid) {
2644 		nmp->b_cont = NULL;
2645 		if (first_entry) {
2646 			*mp = nmp;
2647 			*mp_cont = NULL;
2648 		} else {
2649 			*mp_cont = nmp;
2650 		}
2651 	}
2652 
2653 	/*
2654 	 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2655 	 * If a packet is not fragmented and no error bit is set, then
2656 	 * L4 checksum is OK.
2657 	 */
2658 
2659 	if (is_valid && !multi) {
2660 		/*
2661 		 * If the checksum flag nxge_chksum_offload
2662 		 * is 1, TCP and UDP packets can be sent
2663 		 * up with good checksum. If the checksum flag
2664 		 * is set to 0, checksum reporting will apply to
2665 		 * TCP packets only (workaround for a hardware bug).
2666 		 * If the checksum flag nxge_cksum_offload is
2667 		 * greater than 1, both TCP and UDP packets
2668 		 * will not be reported its hardware checksum results.
2669 		 */
2670 		if (nxge_cksum_offload == 1) {
2671 			is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
2672 			    pkt_type == RCR_PKT_IS_UDP) ?
2673 			    B_TRUE: B_FALSE);
2674 		} else if (!nxge_cksum_offload) {
2675 			/* TCP checksum only. */
2676 			is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ?
2677 			    B_TRUE: B_FALSE);
2678 		}
2679 
2680 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
2681 		    "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
2682 		    is_valid, multi, is_tcp_udp, frag, error_type));
2683 
2684 		if (is_tcp_udp && !frag && !error_type) {
2685 			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
2686 			    HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
2687 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
2688 			    "==> nxge_receive_packet: Full tcp/udp cksum "
2689 			    "is_valid 0x%x multi 0x%llx pkt %d frag %d "
2690 			    "error %d",
2691 			    is_valid, multi, is_tcp_udp, frag, error_type));
2692 		}
2693 	}
2694 
2695 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2696 	    "==> nxge_receive_packet: *mp 0x%016llx", *mp));
2697 
2698 	*multi_p = (multi == RCR_MULTI_MASK);
2699 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
2700 	    "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2701 	    *multi_p, nmp, *mp, *mp_cont));
2702 }
2703 
2704 /*
2705  * Enable polling for a ring. Interrupt for the ring is disabled when
2706  * the nxge interrupt comes (see nxge_rx_intr).
2707  */
2708 int
2709 nxge_enable_poll(void *arg)
2710 {
2711 	p_nxge_ring_handle_t	ring_handle = (p_nxge_ring_handle_t)arg;
2712 	p_rx_rcr_ring_t		ringp;
2713 	p_nxge_t		nxgep;
2714 	p_nxge_ldg_t		ldgp;
2715 	uint32_t		channel;
2716 
2717 	if (ring_handle == NULL) {
2718 		ASSERT(ring_handle != NULL);
2719 		return (0);
2720 	}
2721 
2722 	nxgep = ring_handle->nxgep;
2723 	channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2724 	ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2725 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2726 	    "==> nxge_enable_poll: rdc %d ", ringp->rdc));
2727 	ldgp = ringp->ldgp;
2728 	if (ldgp == NULL) {
2729 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2730 		    "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
2731 		    ringp->rdc));
2732 		return (0);
2733 	}
2734 
2735 	MUTEX_ENTER(&ringp->lock);
2736 	/* enable polling */
2737 	if (ringp->poll_flag == 0) {
2738 		ringp->poll_flag = 1;
2739 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2740 		    "==> nxge_enable_poll: rdc %d set poll flag to 1",
2741 		    ringp->rdc));
2742 	}
2743 
2744 	MUTEX_EXIT(&ringp->lock);
2745 	return (0);
2746 }
2747 /*
2748  * Disable polling for a ring and enable its interrupt.
2749  */
2750 int
2751 nxge_disable_poll(void *arg)
2752 {
2753 	p_nxge_ring_handle_t	ring_handle = (p_nxge_ring_handle_t)arg;
2754 	p_rx_rcr_ring_t		ringp;
2755 	p_nxge_t		nxgep;
2756 	uint32_t		channel;
2757 
2758 	if (ring_handle == NULL) {
2759 		ASSERT(ring_handle != NULL);
2760 		return (0);
2761 	}
2762 
2763 	nxgep = ring_handle->nxgep;
2764 	channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2765 	ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2766 
2767 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2768 	    "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc));
2769 
2770 	MUTEX_ENTER(&ringp->lock);
2771 
2772 	/* disable polling: enable interrupt */
2773 	if (ringp->poll_flag) {
2774 		npi_handle_t		handle;
2775 		rx_dma_ctl_stat_t	cs;
2776 		uint8_t			channel;
2777 		p_nxge_ldg_t		ldgp;
2778 
2779 		/*
2780 		 * Get the control and status for this channel.
2781 		 */
2782 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
2783 		channel = ringp->rdc;
2784 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG,
2785 		    channel, &cs.value);
2786 
2787 		/*
2788 		 * Enable mailbox update
2789 		 * Since packets were not read and the hardware uses
2790 		 * bits pktread and ptrread to update the queue
2791 		 * length, we need to set both bits to 0.
2792 		 */
2793 		cs.bits.ldw.pktread = 0;
2794 		cs.bits.ldw.ptrread = 0;
2795 		cs.bits.hdw.mex = 1;
2796 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2797 		    cs.value);
2798 
2799 		/*
2800 		 * Rearm this logical group if this is a single device
2801 		 * group.
2802 		 */
2803 		ldgp = ringp->ldgp;
2804 		if (ldgp == NULL) {
2805 			ringp->poll_flag = 0;
2806 			MUTEX_EXIT(&ringp->lock);
2807 			NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2808 			    "==> nxge_disable_poll: no ldgp rdc %d "
2809 			    "(still set poll to 0", ringp->rdc));
2810 			return (0);
2811 		}
2812 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2813 		    "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
2814 		    ringp->rdc, ldgp));
2815 		if (ldgp->nldvs == 1) {
2816 			if (isLDOMguest(nxgep)) {
2817 				ldgp->arm = B_TRUE;
2818 				nxge_hio_ldgimgn(nxgep, ldgp);
2819 			} else {
2820 				ldgimgm_t	mgm;
2821 				mgm.value = 0;
2822 				mgm.bits.ldw.arm = 1;
2823 				mgm.bits.ldw.timer = ldgp->ldg_timer;
2824 				NXGE_REG_WR64(handle,
2825 				    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
2826 				    mgm.value);
2827 			}
2828 		}
2829 		ringp->poll_flag = 0;
2830 	}
2831 
2832 	MUTEX_EXIT(&ringp->lock);
2833 	return (0);
2834 }
2835 
2836 /*
2837  * Poll 'bytes_to_pickup' bytes of message from the rx ring.
2838  */
2839 mblk_t *
2840 nxge_rx_poll(void *arg, int bytes_to_pickup)
2841 {
2842 	p_nxge_ring_handle_t	ring_handle = (p_nxge_ring_handle_t)arg;
2843 	p_rx_rcr_ring_t		rcr_p;
2844 	p_nxge_t		nxgep;
2845 	npi_handle_t		handle;
2846 	rx_dma_ctl_stat_t	cs;
2847 	mblk_t			*mblk;
2848 	p_nxge_ldv_t		ldvp;
2849 	uint32_t		channel;
2850 
2851 	nxgep = ring_handle->nxgep;
2852 
2853 	/*
2854 	 * Get the control and status for this channel.
2855 	 */
2856 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2857 	channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2858 	rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel];
2859 	MUTEX_ENTER(&rcr_p->lock);
2860 	ASSERT(rcr_p->poll_flag == 1);
2861 
2862 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value);
2863 
2864 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2865 	    "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
2866 	    rcr_p->rdc, rcr_p->poll_flag));
2867 	mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup);
2868 
2869 	ldvp = rcr_p->ldvp;
2870 	/* error events. */
2871 	if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) {
2872 		(void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs);
2873 	}
2874 
2875 	MUTEX_EXIT(&rcr_p->lock);
2876 
2877 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2878 	    "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk));
2879 	return (mblk);
2880 }
2881 
2882 
2883 /*ARGSUSED*/
2884 static nxge_status_t
2885 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
2886 {
2887 	p_nxge_rx_ring_stats_t	rdc_stats;
2888 	npi_handle_t		handle;
2889 	npi_status_t		rs;
2890 	boolean_t		rxchan_fatal = B_FALSE;
2891 	boolean_t		rxport_fatal = B_FALSE;
2892 	uint8_t			portn;
2893 	nxge_status_t		status = NXGE_OK;
2894 	uint32_t		error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2895 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
2896 
2897 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2898 	portn = nxgep->mac.portnum;
2899 	rdc_stats = &nxgep->statsp->rdc_stats[channel];
2900 
2901 	if (cs.bits.hdw.rbr_tmout) {
2902 		rdc_stats->rx_rbr_tmout++;
2903 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2904 		    NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
2905 		rxchan_fatal = B_TRUE;
2906 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2907 		    "==> nxge_rx_err_evnts: rx_rbr_timeout"));
2908 	}
2909 	if (cs.bits.hdw.rsp_cnt_err) {
2910 		rdc_stats->rsp_cnt_err++;
2911 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2912 		    NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
2913 		rxchan_fatal = B_TRUE;
2914 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2915 		    "==> nxge_rx_err_evnts(channel %d): "
2916 		    "rsp_cnt_err", channel));
2917 	}
2918 	if (cs.bits.hdw.byte_en_bus) {
2919 		rdc_stats->byte_en_bus++;
2920 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2921 		    NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
2922 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2923 		    "==> nxge_rx_err_evnts(channel %d): "
2924 		    "fatal error: byte_en_bus", channel));
2925 		rxchan_fatal = B_TRUE;
2926 	}
2927 	if (cs.bits.hdw.rsp_dat_err) {
2928 		rdc_stats->rsp_dat_err++;
2929 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2930 		    NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
2931 		rxchan_fatal = B_TRUE;
2932 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2933 		    "==> nxge_rx_err_evnts(channel %d): "
2934 		    "fatal error: rsp_dat_err", channel));
2935 	}
2936 	if (cs.bits.hdw.rcr_ack_err) {
2937 		rdc_stats->rcr_ack_err++;
2938 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2939 		    NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
2940 		rxchan_fatal = B_TRUE;
2941 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2942 		    "==> nxge_rx_err_evnts(channel %d): "
2943 		    "fatal error: rcr_ack_err", channel));
2944 	}
2945 	if (cs.bits.hdw.dc_fifo_err) {
2946 		rdc_stats->dc_fifo_err++;
2947 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2948 		    NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
2949 		/* This is not a fatal error! */
2950 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2951 		    "==> nxge_rx_err_evnts(channel %d): "
2952 		    "dc_fifo_err", channel));
2953 		rxport_fatal = B_TRUE;
2954 	}
2955 	if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
2956 		if ((rs = npi_rxdma_ring_perr_stat_get(handle,
2957 		    &rdc_stats->errlog.pre_par,
2958 		    &rdc_stats->errlog.sha_par))
2959 		    != NPI_SUCCESS) {
2960 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2961 			    "==> nxge_rx_err_evnts(channel %d): "
2962 			    "rcr_sha_par: get perr", channel));
2963 			return (NXGE_ERROR | rs);
2964 		}
2965 		if (cs.bits.hdw.rcr_sha_par) {
2966 			rdc_stats->rcr_sha_par++;
2967 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2968 			    NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
2969 			rxchan_fatal = B_TRUE;
2970 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2971 			    "==> nxge_rx_err_evnts(channel %d): "
2972 			    "fatal error: rcr_sha_par", channel));
2973 		}
2974 		if (cs.bits.hdw.rbr_pre_par) {
2975 			rdc_stats->rbr_pre_par++;
2976 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2977 			    NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
2978 			rxchan_fatal = B_TRUE;
2979 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2980 			    "==> nxge_rx_err_evnts(channel %d): "
2981 			    "fatal error: rbr_pre_par", channel));
2982 		}
2983 	}
2984 	/*
2985 	 * The Following 4 status bits are for information, the system
2986 	 * is running fine. There is no need to send FMA ereports or
2987 	 * log messages.
2988 	 */
2989 	if (cs.bits.hdw.port_drop_pkt) {
2990 		rdc_stats->port_drop_pkt++;
2991 	}
2992 	if (cs.bits.hdw.wred_drop) {
2993 		rdc_stats->wred_drop++;
2994 	}
2995 	if (cs.bits.hdw.rbr_pre_empty) {
2996 		rdc_stats->rbr_pre_empty++;
2997 	}
2998 	if (cs.bits.hdw.rcr_shadow_full) {
2999 		rdc_stats->rcr_shadow_full++;
3000 	}
3001 	if (cs.bits.hdw.config_err) {
3002 		rdc_stats->config_err++;
3003 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3004 		    NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
3005 		rxchan_fatal = B_TRUE;
3006 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3007 		    "==> nxge_rx_err_evnts(channel %d): "
3008 		    "config error", channel));
3009 	}
3010 	if (cs.bits.hdw.rcrincon) {
3011 		rdc_stats->rcrincon++;
3012 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3013 		    NXGE_FM_EREPORT_RDMC_RCRINCON);
3014 		rxchan_fatal = B_TRUE;
3015 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3016 		    "==> nxge_rx_err_evnts(channel %d): "
3017 		    "fatal error: rcrincon error", channel));
3018 	}
3019 	if (cs.bits.hdw.rcrfull) {
3020 		rdc_stats->rcrfull++;
3021 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3022 		    NXGE_FM_EREPORT_RDMC_RCRFULL);
3023 		rxchan_fatal = B_TRUE;
3024 		if (rdc_stats->rcrfull < error_disp_cnt)
3025 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3026 		    "==> nxge_rx_err_evnts(channel %d): "
3027 		    "fatal error: rcrfull error", channel));
3028 	}
3029 	if (cs.bits.hdw.rbr_empty) {
3030 		/*
3031 		 * This bit is for information, there is no need
3032 		 * send FMA ereport or log a message.
3033 		 */
3034 		rdc_stats->rbr_empty++;
3035 	}
3036 	if (cs.bits.hdw.rbrfull) {
3037 		rdc_stats->rbrfull++;
3038 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3039 		    NXGE_FM_EREPORT_RDMC_RBRFULL);
3040 		rxchan_fatal = B_TRUE;
3041 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3042 		    "==> nxge_rx_err_evnts(channel %d): "
3043 		    "fatal error: rbr_full error", channel));
3044 	}
3045 	if (cs.bits.hdw.rbrlogpage) {
3046 		rdc_stats->rbrlogpage++;
3047 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3048 		    NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
3049 		rxchan_fatal = B_TRUE;
3050 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3051 		    "==> nxge_rx_err_evnts(channel %d): "
3052 		    "fatal error: rbr logical page error", channel));
3053 	}
3054 	if (cs.bits.hdw.cfiglogpage) {
3055 		rdc_stats->cfiglogpage++;
3056 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3057 		    NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
3058 		rxchan_fatal = B_TRUE;
3059 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3060 		    "==> nxge_rx_err_evnts(channel %d): "
3061 		    "fatal error: cfig logical page error", channel));
3062 	}
3063 
3064 	if (rxport_fatal)  {
3065 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3066 		    " nxge_rx_err_evnts: fatal error on Port #%d\n",
3067 		    portn));
3068 		if (isLDOMguest(nxgep)) {
3069 			status = NXGE_ERROR;
3070 		} else {
3071 			status = nxge_ipp_fatal_err_recover(nxgep);
3072 			if (status == NXGE_OK) {
3073 				FM_SERVICE_RESTORED(nxgep);
3074 			}
3075 		}
3076 	}
3077 
3078 	if (rxchan_fatal) {
3079 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3080 		    " nxge_rx_err_evnts: fatal error on Channel #%d\n",
3081 		    channel));
3082 		if (isLDOMguest(nxgep)) {
3083 			status = NXGE_ERROR;
3084 		} else {
3085 			status = nxge_rxdma_fatal_err_recover(nxgep, channel);
3086 			if (status == NXGE_OK) {
3087 				FM_SERVICE_RESTORED(nxgep);
3088 			}
3089 		}
3090 	}
3091 
3092 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
3093 
3094 	return (status);
3095 }
3096 
3097 /*
3098  * nxge_rdc_hvio_setup
3099  *
3100  *	This code appears to setup some Hypervisor variables.
3101  *
3102  * Arguments:
3103  * 	nxgep
3104  * 	channel
3105  *
3106  * Notes:
3107  *	What does NIU_LP_WORKAROUND mean?
3108  *
3109  * NPI/NXGE function calls:
3110  *	na
3111  *
3112  * Context:
3113  *	Any domain
3114  */
3115 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3116 static void
3117 nxge_rdc_hvio_setup(
3118 	nxge_t *nxgep, int channel)
3119 {
3120 	nxge_dma_common_t	*dma_common;
3121 	nxge_dma_common_t	*dma_control;
3122 	rx_rbr_ring_t		*ring;
3123 
3124 	ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3125 	dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3126 
3127 	ring->hv_set = B_FALSE;
3128 
3129 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)
3130 	    dma_common->orig_ioaddr_pp;
3131 	ring->hv_rx_buf_ioaddr_size = (uint64_t)
3132 	    dma_common->orig_alength;
3133 
3134 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3135 	    "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
3136 	    channel, ring->hv_rx_buf_base_ioaddr_pp,
3137 	    dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size,
3138 	    dma_common->orig_alength, dma_common->orig_alength));
3139 
3140 	dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3141 
3142 	ring->hv_rx_cntl_base_ioaddr_pp =
3143 	    (uint64_t)dma_control->orig_ioaddr_pp;
3144 	ring->hv_rx_cntl_ioaddr_size =
3145 	    (uint64_t)dma_control->orig_alength;
3146 
3147 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3148 	    "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
3149 	    channel, ring->hv_rx_cntl_base_ioaddr_pp,
3150 	    dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size,
3151 	    dma_control->orig_alength, dma_control->orig_alength));
3152 }
3153 #endif
3154 
3155 /*
3156  * nxge_map_rxdma
3157  *
3158  *	Map an RDC into our kernel space.
3159  *
3160  * Arguments:
3161  * 	nxgep
3162  * 	channel	The channel to map.
3163  *
3164  * Notes:
3165  *	1. Allocate & initialise a memory pool, if necessary.
3166  *	2. Allocate however many receive buffers are required.
3167  *	3. Setup buffers, descriptors, and mailbox.
3168  *
3169  * NPI/NXGE function calls:
3170  *	nxge_alloc_rx_mem_pool()
3171  *	nxge_alloc_rbb()
3172  *	nxge_map_rxdma_channel()
3173  *
3174  * Registers accessed:
3175  *
3176  * Context:
3177  *	Any domain
3178  */
3179 static nxge_status_t
3180 nxge_map_rxdma(p_nxge_t nxgep, int channel)
3181 {
3182 	nxge_dma_common_t	**data;
3183 	nxge_dma_common_t	**control;
3184 	rx_rbr_ring_t		**rbr_ring;
3185 	rx_rcr_ring_t		**rcr_ring;
3186 	rx_mbox_t		**mailbox;
3187 	uint32_t		chunks;
3188 
3189 	nxge_status_t		status;
3190 
3191 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
3192 
3193 	if (!nxgep->rx_buf_pool_p) {
3194 		if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) {
3195 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3196 			    "<== nxge_map_rxdma: buf not allocated"));
3197 			return (NXGE_ERROR);
3198 		}
3199 	}
3200 
3201 	if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK)
3202 		return (NXGE_ERROR);
3203 
3204 	/*
3205 	 * Map descriptors from the buffer polls for each dma channel.
3206 	 */
3207 
3208 	/*
3209 	 * Set up and prepare buffer blocks, descriptors
3210 	 * and mailbox.
3211 	 */
3212 	data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3213 	rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel];
3214 	chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
3215 
3216 	control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3217 	rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel];
3218 
3219 	mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3220 
3221 	status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring,
3222 	    chunks, control, rcr_ring, mailbox);
3223 	if (status != NXGE_OK) {
3224 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3225 		    "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
3226 		    "returned 0x%x",
3227 		    channel, status));
3228 		return (status);
3229 	}
3230 	nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel;
3231 	nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel;
3232 	nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats =
3233 	    &nxgep->statsp->rdc_stats[channel];
3234 
3235 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3236 	if (!isLDOMguest(nxgep))
3237 		nxge_rdc_hvio_setup(nxgep, channel);
3238 #endif
3239 
3240 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3241 	    "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel));
3242 
3243 	return (status);
3244 }
3245 
3246 static void
3247 nxge_unmap_rxdma(p_nxge_t nxgep, int channel)
3248 {
3249 	rx_rbr_ring_t	*rbr_ring;
3250 	rx_rcr_ring_t	*rcr_ring;
3251 	rx_mbox_t	*mailbox;
3252 
3253 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel));
3254 
3255 	if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings ||
3256 	    !nxgep->rx_mbox_areas_p)
3257 		return;
3258 
3259 	rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3260 	rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
3261 	mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3262 
3263 	if (!rbr_ring || !rcr_ring || !mailbox)
3264 		return;
3265 
3266 	(void) nxge_unmap_rxdma_channel(
3267 	    nxgep, channel, rbr_ring, rcr_ring, mailbox);
3268 
3269 	nxge_free_rxb(nxgep, channel);
3270 
3271 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma"));
3272 }
3273 
3274 nxge_status_t
3275 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3276     p_nxge_dma_common_t *dma_buf_p,  p_rx_rbr_ring_t *rbr_p,
3277     uint32_t num_chunks,
3278     p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
3279     p_rx_mbox_t *rx_mbox_p)
3280 {
3281 	int	status = NXGE_OK;
3282 
3283 	/*
3284 	 * Set up and prepare buffer blocks, descriptors
3285 	 * and mailbox.
3286 	 */
3287 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3288 	    "==> nxge_map_rxdma_channel (channel %d)", channel));
3289 	/*
3290 	 * Receive buffer blocks
3291 	 */
3292 	status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
3293 	    dma_buf_p, rbr_p, num_chunks);
3294 	if (status != NXGE_OK) {
3295 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3296 		    "==> nxge_map_rxdma_channel (channel %d): "
3297 		    "map buffer failed 0x%x", channel, status));
3298 		goto nxge_map_rxdma_channel_exit;
3299 	}
3300 
3301 	/*
3302 	 * Receive block ring, completion ring and mailbox.
3303 	 */
3304 	status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
3305 	    dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
3306 	if (status != NXGE_OK) {
3307 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3308 		    "==> nxge_map_rxdma_channel (channel %d): "
3309 		    "map config failed 0x%x", channel, status));
3310 		goto nxge_map_rxdma_channel_fail2;
3311 	}
3312 
3313 	goto nxge_map_rxdma_channel_exit;
3314 
3315 nxge_map_rxdma_channel_fail3:
3316 	/* Free rbr, rcr */
3317 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3318 	    "==> nxge_map_rxdma_channel: free rbr/rcr "
3319 	    "(status 0x%x channel %d)",
3320 	    status, channel));
3321 	nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3322 	    *rcr_p, *rx_mbox_p);
3323 
3324 nxge_map_rxdma_channel_fail2:
3325 	/* Free buffer blocks */
3326 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3327 	    "==> nxge_map_rxdma_channel: free rx buffers"
3328 	    "(nxgep 0x%x status 0x%x channel %d)",
3329 	    nxgep, status, channel));
3330 	nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
3331 
3332 	status = NXGE_ERROR;
3333 
3334 nxge_map_rxdma_channel_exit:
3335 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3336 	    "<== nxge_map_rxdma_channel: "
3337 	    "(nxgep 0x%x status 0x%x channel %d)",
3338 	    nxgep, status, channel));
3339 
3340 	return (status);
3341 }
3342 
3343 /*ARGSUSED*/
3344 static void
3345 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3346     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3347 {
3348 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3349 	    "==> nxge_unmap_rxdma_channel (channel %d)", channel));
3350 
3351 	/*
3352 	 * unmap receive block ring, completion ring and mailbox.
3353 	 */
3354 	(void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3355 	    rcr_p, rx_mbox_p);
3356 
3357 	/* unmap buffer blocks */
3358 	(void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
3359 
3360 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
3361 }
3362 
3363 /*ARGSUSED*/
3364 static nxge_status_t
3365 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
3366     p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
3367     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
3368 {
3369 	p_rx_rbr_ring_t 	rbrp;
3370 	p_rx_rcr_ring_t 	rcrp;
3371 	p_rx_mbox_t 		mboxp;
3372 	p_nxge_dma_common_t 	cntl_dmap;
3373 	p_nxge_dma_common_t 	dmap;
3374 	p_rx_msg_t 		*rx_msg_ring;
3375 	p_rx_msg_t 		rx_msg_p;
3376 	p_rbr_cfig_a_t		rcfga_p;
3377 	p_rbr_cfig_b_t		rcfgb_p;
3378 	p_rcrcfig_a_t		cfga_p;
3379 	p_rcrcfig_b_t		cfgb_p;
3380 	p_rxdma_cfig1_t		cfig1_p;
3381 	p_rxdma_cfig2_t		cfig2_p;
3382 	p_rbr_kick_t		kick_p;
3383 	uint32_t		dmaaddrp;
3384 	uint32_t		*rbr_vaddrp;
3385 	uint32_t		bkaddr;
3386 	nxge_status_t		status = NXGE_OK;
3387 	int			i;
3388 	uint32_t 		nxge_port_rcr_size;
3389 
3390 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3391 	    "==> nxge_map_rxdma_channel_cfg_ring"));
3392 
3393 	cntl_dmap = *dma_cntl_p;
3394 
3395 	/* Map in the receive block ring */
3396 	rbrp = *rbr_p;
3397 	dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
3398 	nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
3399 	/*
3400 	 * Zero out buffer block ring descriptors.
3401 	 */
3402 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
3403 
3404 	rcfga_p = &(rbrp->rbr_cfga);
3405 	rcfgb_p = &(rbrp->rbr_cfgb);
3406 	kick_p = &(rbrp->rbr_kick);
3407 	rcfga_p->value = 0;
3408 	rcfgb_p->value = 0;
3409 	kick_p->value = 0;
3410 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
3411 	rcfga_p->value = (rbrp->rbr_addr &
3412 	    (RBR_CFIG_A_STDADDR_MASK |
3413 	    RBR_CFIG_A_STDADDR_BASE_MASK));
3414 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
3415 
3416 	rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
3417 	rcfgb_p->bits.ldw.vld0 = 1;
3418 	rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
3419 	rcfgb_p->bits.ldw.vld1 = 1;
3420 	rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
3421 	rcfgb_p->bits.ldw.vld2 = 1;
3422 	rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
3423 
3424 	/*
3425 	 * For each buffer block, enter receive block address to the ring.
3426 	 */
3427 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
3428 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
3429 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3430 	    "==> nxge_map_rxdma_channel_cfg_ring: channel %d "
3431 	    "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
3432 
3433 	rx_msg_ring = rbrp->rx_msg_ring;
3434 	for (i = 0; i < rbrp->tnblocks; i++) {
3435 		rx_msg_p = rx_msg_ring[i];
3436 		rx_msg_p->nxgep = nxgep;
3437 		rx_msg_p->rx_rbr_p = rbrp;
3438 		bkaddr = (uint32_t)
3439 		    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
3440 		    >> RBR_BKADDR_SHIFT));
3441 		rx_msg_p->free = B_FALSE;
3442 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
3443 
3444 		*rbr_vaddrp++ = bkaddr;
3445 	}
3446 
3447 	kick_p->bits.ldw.bkadd = rbrp->rbb_max;
3448 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3449 
3450 	rbrp->rbr_rd_index = 0;
3451 
3452 	rbrp->rbr_consumed = 0;
3453 	rbrp->rbr_use_bcopy = B_TRUE;
3454 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
3455 	/*
3456 	 * Do bcopy on packets greater than bcopy size once
3457 	 * the lo threshold is reached.
3458 	 * This lo threshold should be less than the hi threshold.
3459 	 *
3460 	 * Do bcopy on every packet once the hi threshold is reached.
3461 	 */
3462 	if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
3463 		/* default it to use hi */
3464 		nxge_rx_threshold_lo = nxge_rx_threshold_hi;
3465 	}
3466 
3467 	if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
3468 		nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
3469 	}
3470 	rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
3471 
3472 	switch (nxge_rx_threshold_hi) {
3473 	default:
3474 	case	NXGE_RX_COPY_NONE:
3475 		/* Do not do bcopy at all */
3476 		rbrp->rbr_use_bcopy = B_FALSE;
3477 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
3478 		break;
3479 
3480 	case NXGE_RX_COPY_1:
3481 	case NXGE_RX_COPY_2:
3482 	case NXGE_RX_COPY_3:
3483 	case NXGE_RX_COPY_4:
3484 	case NXGE_RX_COPY_5:
3485 	case NXGE_RX_COPY_6:
3486 	case NXGE_RX_COPY_7:
3487 		rbrp->rbr_threshold_hi =
3488 		    rbrp->rbb_max *
3489 		    (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
3490 		break;
3491 
3492 	case NXGE_RX_COPY_ALL:
3493 		rbrp->rbr_threshold_hi = 0;
3494 		break;
3495 	}
3496 
3497 	switch (nxge_rx_threshold_lo) {
3498 	default:
3499 	case	NXGE_RX_COPY_NONE:
3500 		/* Do not do bcopy at all */
3501 		if (rbrp->rbr_use_bcopy) {
3502 			rbrp->rbr_use_bcopy = B_FALSE;
3503 		}
3504 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
3505 		break;
3506 
3507 	case NXGE_RX_COPY_1:
3508 	case NXGE_RX_COPY_2:
3509 	case NXGE_RX_COPY_3:
3510 	case NXGE_RX_COPY_4:
3511 	case NXGE_RX_COPY_5:
3512 	case NXGE_RX_COPY_6:
3513 	case NXGE_RX_COPY_7:
3514 		rbrp->rbr_threshold_lo =
3515 		    rbrp->rbb_max *
3516 		    (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
3517 		break;
3518 
3519 	case NXGE_RX_COPY_ALL:
3520 		rbrp->rbr_threshold_lo = 0;
3521 		break;
3522 	}
3523 
3524 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
3525 	    "nxge_map_rxdma_channel_cfg_ring: channel %d "
3526 	    "rbb_max %d "
3527 	    "rbrp->rbr_bufsize_type %d "
3528 	    "rbb_threshold_hi %d "
3529 	    "rbb_threshold_lo %d",
3530 	    dma_channel,
3531 	    rbrp->rbb_max,
3532 	    rbrp->rbr_bufsize_type,
3533 	    rbrp->rbr_threshold_hi,
3534 	    rbrp->rbr_threshold_lo));
3535 
3536 	rbrp->page_valid.value = 0;
3537 	rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
3538 	rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
3539 	rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
3540 	rbrp->page_hdl.value = 0;
3541 
3542 	rbrp->page_valid.bits.ldw.page0 = 1;
3543 	rbrp->page_valid.bits.ldw.page1 = 1;
3544 
3545 	/* Map in the receive completion ring */
3546 	rcrp = (p_rx_rcr_ring_t)
3547 	    KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
3548 	rcrp->rdc = dma_channel;
3549 
3550 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
3551 	rcrp->comp_size = nxge_port_rcr_size;
3552 	rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
3553 
3554 	rcrp->max_receive_pkts = nxge_max_rx_pkts;
3555 
3556 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
3557 	nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
3558 	    sizeof (rcr_entry_t));
3559 	rcrp->comp_rd_index = 0;
3560 	rcrp->comp_wt_index = 0;
3561 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3562 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3563 #if defined(__i386)
3564 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3565 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3566 #else
3567 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3568 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3569 #endif
3570 
3571 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3572 	    (nxge_port_rcr_size - 1);
3573 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3574 	    (nxge_port_rcr_size - 1);
3575 
3576 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3577 	    "==> nxge_map_rxdma_channel_cfg_ring: "
3578 	    "channel %d "
3579 	    "rbr_vaddrp $%p "
3580 	    "rcr_desc_rd_head_p $%p "
3581 	    "rcr_desc_rd_head_pp $%p "
3582 	    "rcr_desc_rd_last_p $%p "
3583 	    "rcr_desc_rd_last_pp $%p ",
3584 	    dma_channel,
3585 	    rbr_vaddrp,
3586 	    rcrp->rcr_desc_rd_head_p,
3587 	    rcrp->rcr_desc_rd_head_pp,
3588 	    rcrp->rcr_desc_last_p,
3589 	    rcrp->rcr_desc_last_pp));
3590 
3591 	/*
3592 	 * Zero out buffer block ring descriptors.
3593 	 */
3594 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
3595 
3596 	rcrp->intr_timeout = (nxgep->intr_timeout <
3597 	    NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
3598 	    nxgep->intr_timeout;
3599 
3600 	rcrp->intr_threshold = (nxgep->intr_threshold <
3601 	    NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
3602 	    nxgep->intr_threshold;
3603 
3604 	rcrp->full_hdr_flag = B_FALSE;
3605 	rcrp->sw_priv_hdr_len = 0;
3606 
3607 	cfga_p = &(rcrp->rcr_cfga);
3608 	cfgb_p = &(rcrp->rcr_cfgb);
3609 	cfga_p->value = 0;
3610 	cfgb_p->value = 0;
3611 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
3612 	cfga_p->value = (rcrp->rcr_addr &
3613 	    (RCRCFIG_A_STADDR_MASK |
3614 	    RCRCFIG_A_STADDR_BASE_MASK));
3615 
3616 	rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
3617 	    RCRCFIG_A_LEN_SHIF);
3618 
3619 	/*
3620 	 * Timeout should be set based on the system clock divider.
3621 	 * A timeout value of 1 assumes that the
3622 	 * granularity (1000) is 3 microseconds running at 300MHz.
3623 	 */
3624 	cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
3625 	cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
3626 	cfgb_p->bits.ldw.entout = 1;
3627 
3628 	/* Map in the mailbox */
3629 	mboxp = (p_rx_mbox_t)
3630 	    KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
3631 	dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
3632 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
3633 	cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
3634 	cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
3635 	cfig1_p->value = cfig2_p->value = 0;
3636 
3637 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
3638 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3639 	    "==> nxge_map_rxdma_channel_cfg_ring: "
3640 	    "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
3641 	    dma_channel, cfig1_p->value, cfig2_p->value,
3642 	    mboxp->mbox_addr));
3643 
3644 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
3645 	    & 0xfff);
3646 	cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
3647 
3648 
3649 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
3650 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
3651 	    RXDMA_CFIG2_MBADDR_L_MASK);
3652 
3653 	cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
3654 
3655 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3656 	    "==> nxge_map_rxdma_channel_cfg_ring: "
3657 	    "channel %d damaddrp $%p "
3658 	    "cfg1 0x%016llx cfig2 0x%016llx",
3659 	    dma_channel, dmaaddrp,
3660 	    cfig1_p->value, cfig2_p->value));
3661 
3662 	cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
3663 	cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
3664 
3665 	rbrp->rx_rcr_p = rcrp;
3666 	rcrp->rx_rbr_p = rbrp;
3667 	*rcr_p = rcrp;
3668 	*rx_mbox_p = mboxp;
3669 
3670 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3671 	    "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
3672 
3673 	return (status);
3674 }
3675 
3676 /*ARGSUSED*/
3677 static void
3678 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
3679     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3680 {
3681 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3682 	    "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
3683 	    rcr_p->rdc));
3684 
3685 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
3686 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
3687 
3688 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3689 	    "<== nxge_unmap_rxdma_channel_cfg_ring"));
3690 }
3691 
3692 static nxge_status_t
3693 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
3694     p_nxge_dma_common_t *dma_buf_p,
3695     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
3696 {
3697 	p_rx_rbr_ring_t 	rbrp;
3698 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
3699 	p_rx_msg_t 		*rx_msg_ring;
3700 	p_rx_msg_t 		rx_msg_p;
3701 	p_mblk_t 		mblk_p;
3702 
3703 	rxring_info_t *ring_info;
3704 	nxge_status_t		status = NXGE_OK;
3705 	int			i, j, index;
3706 	uint32_t		size, bsize, nblocks, nmsgs;
3707 
3708 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3709 	    "==> nxge_map_rxdma_channel_buf_ring: channel %d",
3710 	    channel));
3711 
3712 	dma_bufp = tmp_bufp = *dma_buf_p;
3713 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3714 	    " nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
3715 	    "chunks bufp 0x%016llx",
3716 	    channel, num_chunks, dma_bufp));
3717 
3718 	nmsgs = 0;
3719 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
3720 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3721 		    "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3722 		    "bufp 0x%016llx nblocks %d nmsgs %d",
3723 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
3724 		nmsgs += tmp_bufp->nblocks;
3725 	}
3726 	if (!nmsgs) {
3727 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3728 		    "<== nxge_map_rxdma_channel_buf_ring: channel %d "
3729 		    "no msg blocks",
3730 		    channel));
3731 		status = NXGE_ERROR;
3732 		goto nxge_map_rxdma_channel_buf_ring_exit;
3733 	}
3734 
3735 	rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP);
3736 
3737 	size = nmsgs * sizeof (p_rx_msg_t);
3738 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
3739 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
3740 	    KM_SLEEP);
3741 
3742 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
3743 	    (void *)nxgep->interrupt_cookie);
3744 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
3745 	    (void *)nxgep->interrupt_cookie);
3746 	rbrp->rdc = channel;
3747 	rbrp->num_blocks = num_chunks;
3748 	rbrp->tnblocks = nmsgs;
3749 	rbrp->rbb_max = nmsgs;
3750 	rbrp->rbr_max_size = nmsgs;
3751 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
3752 
3753 	/*
3754 	 * Buffer sizes suggested by NIU architect.
3755 	 * 256, 512 and 2K.
3756 	 */
3757 
3758 	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
3759 	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
3760 	rbrp->npi_pkt_buf_size0 = SIZE_256B;
3761 
3762 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
3763 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
3764 	rbrp->npi_pkt_buf_size1 = SIZE_1KB;
3765 
3766 	rbrp->block_size = nxgep->rx_default_block_size;
3767 
3768 	if (!nxgep->mac.is_jumbo) {
3769 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
3770 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
3771 		rbrp->npi_pkt_buf_size2 = SIZE_2KB;
3772 	} else {
3773 		if (rbrp->block_size >= 0x2000) {
3774 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
3775 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
3776 			rbrp->npi_pkt_buf_size2 = SIZE_8KB;
3777 		} else {
3778 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
3779 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
3780 			rbrp->npi_pkt_buf_size2 = SIZE_4KB;
3781 		}
3782 	}
3783 
3784 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3785 	    "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3786 	    "actual rbr max %d rbb_max %d nmsgs %d "
3787 	    "rbrp->block_size %d default_block_size %d "
3788 	    "(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
3789 	    channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
3790 	    rbrp->block_size, nxgep->rx_default_block_size,
3791 	    nxge_rbr_size, nxge_rbr_spare_size));
3792 
3793 	/* Map in buffers from the buffer pool.  */
3794 	index = 0;
3795 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
3796 		bsize = dma_bufp->block_size;
3797 		nblocks = dma_bufp->nblocks;
3798 #if defined(__i386)
3799 		ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
3800 #else
3801 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
3802 #endif
3803 		ring_info->buffer[i].buf_index = i;
3804 		ring_info->buffer[i].buf_size = dma_bufp->alength;
3805 		ring_info->buffer[i].start_index = index;
3806 #if defined(__i386)
3807 		ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
3808 #else
3809 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
3810 #endif
3811 
3812 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3813 		    " nxge_map_rxdma_channel_buf_ring: map channel %d "
3814 		    "chunk %d"
3815 		    " nblocks %d chunk_size %x block_size 0x%x "
3816 		    "dma_bufp $%p", channel, i,
3817 		    dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3818 		    dma_bufp));
3819 
3820 		for (j = 0; j < nblocks; j++) {
3821 			if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
3822 			    dma_bufp)) == NULL) {
3823 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3824 				    "allocb failed (index %d i %d j %d)",
3825 				    index, i, j));
3826 				goto nxge_map_rxdma_channel_buf_ring_fail1;
3827 			}
3828 			rx_msg_ring[index] = rx_msg_p;
3829 			rx_msg_p->block_index = index;
3830 			rx_msg_p->shifted_addr = (uint32_t)
3831 			    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
3832 			    RBR_BKADDR_SHIFT));
3833 
3834 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3835 			    "index %d j %d rx_msg_p $%p mblk %p",
3836 			    index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
3837 
3838 			mblk_p = rx_msg_p->rx_mblk_p;
3839 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3840 
3841 			rbrp->rbr_ref_cnt++;
3842 			index++;
3843 			rx_msg_p->buf_dma.dma_channel = channel;
3844 		}
3845 
3846 		rbrp->rbr_alloc_type = DDI_MEM_ALLOC;
3847 		if (dma_bufp->contig_alloc_type) {
3848 			rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC;
3849 		}
3850 
3851 		if (dma_bufp->kmem_alloc_type) {
3852 			rbrp->rbr_alloc_type = KMEM_ALLOC;
3853 		}
3854 
3855 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3856 		    " nxge_map_rxdma_channel_buf_ring: map channel %d "
3857 		    "chunk %d"
3858 		    " nblocks %d chunk_size %x block_size 0x%x "
3859 		    "dma_bufp $%p",
3860 		    channel, i,
3861 		    dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3862 		    dma_bufp));
3863 	}
3864 	if (i < rbrp->num_blocks) {
3865 		goto nxge_map_rxdma_channel_buf_ring_fail1;
3866 	}
3867 
3868 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3869 	    "nxge_map_rxdma_channel_buf_ring: done buf init "
3870 	    "channel %d msg block entries %d",
3871 	    channel, index));
3872 	ring_info->block_size_mask = bsize - 1;
3873 	rbrp->rx_msg_ring = rx_msg_ring;
3874 	rbrp->dma_bufp = dma_buf_p;
3875 	rbrp->ring_info = ring_info;
3876 
3877 	status = nxge_rxbuf_index_info_init(nxgep, rbrp);
3878 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3879 	    " nxge_map_rxdma_channel_buf_ring: "
3880 	    "channel %d done buf info init", channel));
3881 
3882 	/*
3883 	 * Finally, permit nxge_freeb() to call nxge_post_page().
3884 	 */
3885 	rbrp->rbr_state = RBR_POSTING;
3886 
3887 	*rbr_p = rbrp;
3888 	goto nxge_map_rxdma_channel_buf_ring_exit;
3889 
3890 nxge_map_rxdma_channel_buf_ring_fail1:
3891 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3892 	    " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
3893 	    channel, status));
3894 
3895 	index--;
3896 	for (; index >= 0; index--) {
3897 		rx_msg_p = rx_msg_ring[index];
3898 		if (rx_msg_p != NULL) {
3899 			freeb(rx_msg_p->rx_mblk_p);
3900 			rx_msg_ring[index] = NULL;
3901 		}
3902 	}
3903 nxge_map_rxdma_channel_buf_ring_fail:
3904 	MUTEX_DESTROY(&rbrp->post_lock);
3905 	MUTEX_DESTROY(&rbrp->lock);
3906 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
3907 	KMEM_FREE(rx_msg_ring, size);
3908 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
3909 
3910 	status = NXGE_ERROR;
3911 
3912 nxge_map_rxdma_channel_buf_ring_exit:
3913 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3914 	    "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
3915 
3916 	return (status);
3917 }
3918 
3919 /*ARGSUSED*/
3920 static void
3921 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
3922     p_rx_rbr_ring_t rbr_p)
3923 {
3924 	p_rx_msg_t 		*rx_msg_ring;
3925 	p_rx_msg_t 		rx_msg_p;
3926 	rxring_info_t 		*ring_info;
3927 	int			i;
3928 	uint32_t		size;
3929 #ifdef	NXGE_DEBUG
3930 	int			num_chunks;
3931 #endif
3932 
3933 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3934 	    "==> nxge_unmap_rxdma_channel_buf_ring"));
3935 	if (rbr_p == NULL) {
3936 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
3937 		    "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
3938 		return;
3939 	}
3940 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3941 	    "==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
3942 	    rbr_p->rdc));
3943 
3944 	rx_msg_ring = rbr_p->rx_msg_ring;
3945 	ring_info = rbr_p->ring_info;
3946 
3947 	if (rx_msg_ring == NULL || ring_info == NULL) {
3948 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3949 		    "<== nxge_unmap_rxdma_channel_buf_ring: "
3950 		    "rx_msg_ring $%p ring_info $%p",
3951 		    rx_msg_p, ring_info));
3952 		return;
3953 	}
3954 
3955 #ifdef	NXGE_DEBUG
3956 	num_chunks = rbr_p->num_blocks;
3957 #endif
3958 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
3959 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3960 	    " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
3961 	    "tnblocks %d (max %d) size ptrs %d ",
3962 	    rbr_p->rdc, num_chunks,
3963 	    rbr_p->tnblocks, rbr_p->rbr_max_size, size));
3964 
3965 	for (i = 0; i < rbr_p->tnblocks; i++) {
3966 		rx_msg_p = rx_msg_ring[i];
3967 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3968 		    " nxge_unmap_rxdma_channel_buf_ring: "
3969 		    "rx_msg_p $%p",
3970 		    rx_msg_p));
3971 		if (rx_msg_p != NULL) {
3972 			freeb(rx_msg_p->rx_mblk_p);
3973 			rx_msg_ring[i] = NULL;
3974 		}
3975 	}
3976 
3977 	/*
3978 	 * We no longer may use the mutex <post_lock>. By setting
3979 	 * <rbr_state> to anything but POSTING, we prevent
3980 	 * nxge_post_page() from accessing a dead mutex.
3981 	 */
3982 	rbr_p->rbr_state = RBR_UNMAPPING;
3983 	MUTEX_DESTROY(&rbr_p->post_lock);
3984 
3985 	MUTEX_DESTROY(&rbr_p->lock);
3986 
3987 	if (rbr_p->rbr_ref_cnt == 0) {
3988 		/*
3989 		 * This is the normal state of affairs.
3990 		 * Need to free the following buffers:
3991 		 *  - data buffers
3992 		 *  - rx_msg ring
3993 		 *  - ring_info
3994 		 *  - rbr ring
3995 		 */
3996 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
3997 		    "unmap_rxdma_buf_ring: No outstanding - freeing "));
3998 		nxge_rxdma_databuf_free(rbr_p);
3999 		KMEM_FREE(ring_info, sizeof (rxring_info_t));
4000 		KMEM_FREE(rx_msg_ring, size);
4001 		KMEM_FREE(rbr_p, sizeof (*rbr_p));
4002 	} else {
4003 		/*
4004 		 * Some of our buffers are still being used.
4005 		 * Therefore, tell nxge_freeb() this ring is
4006 		 * unmapped, so it may free <rbr_p> for us.
4007 		 */
4008 		rbr_p->rbr_state = RBR_UNMAPPED;
4009 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4010 		    "unmap_rxdma_buf_ring: %d %s outstanding.",
4011 		    rbr_p->rbr_ref_cnt,
4012 		    rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
4013 	}
4014 
4015 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4016 	    "<== nxge_unmap_rxdma_channel_buf_ring"));
4017 }
4018 
4019 /*
4020  * nxge_rxdma_hw_start_common
4021  *
4022  * Arguments:
4023  * 	nxgep
4024  *
4025  * Notes:
4026  *
4027  * NPI/NXGE function calls:
4028  *	nxge_init_fzc_rx_common();
4029  *	nxge_init_fzc_rxdma_port();
4030  *
4031  * Registers accessed:
4032  *
4033  * Context:
4034  *	Service domain
4035  */
4036 static nxge_status_t
4037 nxge_rxdma_hw_start_common(p_nxge_t nxgep)
4038 {
4039 	nxge_status_t		status = NXGE_OK;
4040 
4041 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4042 
4043 	/*
4044 	 * Load the sharable parameters by writing to the
4045 	 * function zero control registers. These FZC registers
4046 	 * should be initialized only once for the entire chip.
4047 	 */
4048 	(void) nxge_init_fzc_rx_common(nxgep);
4049 
4050 	/*
4051 	 * Initialize the RXDMA port specific FZC control configurations.
4052 	 * These FZC registers are pertaining to each port.
4053 	 */
4054 	(void) nxge_init_fzc_rxdma_port(nxgep);
4055 
4056 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4057 
4058 	return (status);
4059 }
4060 
4061 static nxge_status_t
4062 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel)
4063 {
4064 	int			i, ndmas;
4065 	p_rx_rbr_rings_t 	rx_rbr_rings;
4066 	p_rx_rbr_ring_t		*rbr_rings;
4067 	p_rx_rcr_rings_t 	rx_rcr_rings;
4068 	p_rx_rcr_ring_t		*rcr_rings;
4069 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
4070 	p_rx_mbox_t		*rx_mbox_p;
4071 	nxge_status_t		status = NXGE_OK;
4072 
4073 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
4074 
4075 	rx_rbr_rings = nxgep->rx_rbr_rings;
4076 	rx_rcr_rings = nxgep->rx_rcr_rings;
4077 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4078 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
4079 		    "<== nxge_rxdma_hw_start: NULL ring pointers"));
4080 		return (NXGE_ERROR);
4081 	}
4082 	ndmas = rx_rbr_rings->ndmas;
4083 	if (ndmas == 0) {
4084 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
4085 		    "<== nxge_rxdma_hw_start: no dma channel allocated"));
4086 		return (NXGE_ERROR);
4087 	}
4088 
4089 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4090 	    "==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
4091 
4092 	rbr_rings = rx_rbr_rings->rbr_rings;
4093 	rcr_rings = rx_rcr_rings->rcr_rings;
4094 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
4095 	if (rx_mbox_areas_p) {
4096 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
4097 	}
4098 
4099 	i = channel;
4100 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4101 	    "==> nxge_rxdma_hw_start (ndmas %d) channel %d",
4102 	    ndmas, channel));
4103 	status = nxge_rxdma_start_channel(nxgep, channel,
4104 	    (p_rx_rbr_ring_t)rbr_rings[i],
4105 	    (p_rx_rcr_ring_t)rcr_rings[i],
4106 	    (p_rx_mbox_t)rx_mbox_p[i]);
4107 	if (status != NXGE_OK) {
4108 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4109 		    "==> nxge_rxdma_hw_start: disable "
4110 		    "(status 0x%x channel %d)", status, channel));
4111 		return (status);
4112 	}
4113 
4114 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
4115 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
4116 	    rx_rbr_rings, rx_rcr_rings));
4117 
4118 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4119 	    "==> nxge_rxdma_hw_start: (status 0x%x)", status));
4120 
4121 	return (status);
4122 }
4123 
4124 static void
4125 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel)
4126 {
4127 	p_rx_rbr_rings_t 	rx_rbr_rings;
4128 	p_rx_rcr_rings_t 	rx_rcr_rings;
4129 
4130 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
4131 
4132 	rx_rbr_rings = nxgep->rx_rbr_rings;
4133 	rx_rcr_rings = nxgep->rx_rcr_rings;
4134 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4135 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
4136 		    "<== nxge_rxdma_hw_stop: NULL ring pointers"));
4137 		return;
4138 	}
4139 
4140 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4141 	    "==> nxge_rxdma_hw_stop(channel %d)",
4142 	    channel));
4143 	(void) nxge_rxdma_stop_channel(nxgep, channel);
4144 
4145 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
4146 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
4147 	    rx_rbr_rings, rx_rcr_rings));
4148 
4149 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
4150 }
4151 
4152 
4153 static nxge_status_t
4154 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
4155     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
4156 
4157 {
4158 	npi_handle_t		handle;
4159 	npi_status_t		rs = NPI_SUCCESS;
4160 	rx_dma_ctl_stat_t	cs;
4161 	rx_dma_ent_msk_t	ent_mask;
4162 	nxge_status_t		status = NXGE_OK;
4163 
4164 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
4165 
4166 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
4167 
4168 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
4169 		"npi handle addr $%p acc $%p",
4170 		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4171 
4172 	/* Reset RXDMA channel, but not if you're a guest. */
4173 	if (!isLDOMguest(nxgep)) {
4174 		rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4175 		if (rs != NPI_SUCCESS) {
4176 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4177 			    "==> nxge_init_fzc_rdc: "
4178 			    "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
4179 			    channel, rs));
4180 			return (NXGE_ERROR | rs);
4181 		}
4182 
4183 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4184 		    "==> nxge_rxdma_start_channel: reset done: channel %d",
4185 		    channel));
4186 	}
4187 
4188 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4189 	if (isLDOMguest(nxgep))
4190 		(void) nxge_rdc_lp_conf(nxgep, channel);
4191 #endif
4192 
4193 	/*
4194 	 * Initialize the RXDMA channel specific FZC control
4195 	 * configurations. These FZC registers are pertaining
4196 	 * to each RX channel (logical pages).
4197 	 */
4198 	if (!isLDOMguest(nxgep)) {
4199 		status = nxge_init_fzc_rxdma_channel(nxgep, channel);
4200 		if (status != NXGE_OK) {
4201 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4202 				"==> nxge_rxdma_start_channel: "
4203 				"init fzc rxdma failed (0x%08x channel %d)",
4204 				status, channel));
4205 			return (status);
4206 		}
4207 
4208 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4209 			"==> nxge_rxdma_start_channel: fzc done"));
4210 	}
4211 
4212 	/* Set up the interrupt event masks. */
4213 	ent_mask.value = 0;
4214 	ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
4215 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4216 	    &ent_mask);
4217 	if (rs != NPI_SUCCESS) {
4218 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4219 			"==> nxge_rxdma_start_channel: "
4220 			"init rxdma event masks failed "
4221 			"(0x%08x channel %d)",
4222 			status, channel));
4223 		return (NXGE_ERROR | rs);
4224 	}
4225 
4226 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4227 		"==> nxge_rxdma_start_channel: "
4228 		"event done: channel %d (mask 0x%016llx)",
4229 		channel, ent_mask.value));
4230 
4231 	/* Initialize the receive DMA control and status register */
4232 	cs.value = 0;
4233 	cs.bits.hdw.mex = 1;
4234 	cs.bits.hdw.rcrthres = 1;
4235 	cs.bits.hdw.rcrto = 1;
4236 	cs.bits.hdw.rbr_empty = 1;
4237 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4238 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4239 		"channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
4240 	if (status != NXGE_OK) {
4241 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4242 			"==> nxge_rxdma_start_channel: "
4243 			"init rxdma control register failed (0x%08x channel %d",
4244 			status, channel));
4245 		return (status);
4246 	}
4247 
4248 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4249 		"control done - channel %d cs 0x%016llx", channel, cs.value));
4250 
4251 	/*
4252 	 * Load RXDMA descriptors, buffers, mailbox,
4253 	 * initialise the receive DMA channels and
4254 	 * enable each DMA channel.
4255 	 */
4256 	status = nxge_enable_rxdma_channel(nxgep,
4257 	    channel, rbr_p, rcr_p, mbox_p);
4258 
4259 	if (status != NXGE_OK) {
4260 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4261 		    " nxge_rxdma_start_channel: "
4262 		    " enable rxdma failed (0x%08x channel %d)",
4263 		    status, channel));
4264 		return (status);
4265 	}
4266 
4267 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4268 	    "==> nxge_rxdma_start_channel: enabled channel %d"));
4269 
4270 	if (isLDOMguest(nxgep)) {
4271 		/* Add interrupt handler for this channel. */
4272 		status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel);
4273 		if (status != NXGE_OK) {
4274 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4275 			    " nxge_rxdma_start_channel: "
4276 			    " nxge_hio_intr_add failed (0x%08x channel %d)",
4277 			    status, channel));
4278 			return (status);
4279 		}
4280 	}
4281 
4282 	ent_mask.value = 0;
4283 	ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
4284 				RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
4285 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4286 			&ent_mask);
4287 	if (rs != NPI_SUCCESS) {
4288 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4289 			"==> nxge_rxdma_start_channel: "
4290 			"init rxdma event masks failed (0x%08x channel %d)",
4291 			status, channel));
4292 		return (NXGE_ERROR | rs);
4293 	}
4294 
4295 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4296 		"control done - channel %d cs 0x%016llx", channel, cs.value));
4297 
4298 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
4299 
4300 	return (NXGE_OK);
4301 }
4302 
4303 static nxge_status_t
4304 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
4305 {
4306 	npi_handle_t		handle;
4307 	npi_status_t		rs = NPI_SUCCESS;
4308 	rx_dma_ctl_stat_t	cs;
4309 	rx_dma_ent_msk_t	ent_mask;
4310 	nxge_status_t		status = NXGE_OK;
4311 
4312 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
4313 
4314 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
4315 
4316 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
4317 	    "npi handle addr $%p acc $%p",
4318 	    nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4319 
4320 	if (!isLDOMguest(nxgep)) {
4321 		/*
4322 		 * Stop RxMAC = A.9.2.6
4323 		 */
4324 		if (nxge_rx_mac_disable(nxgep) != NXGE_OK) {
4325 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4326 			    "nxge_rxdma_stop_channel: "
4327 			    "Failed to disable RxMAC"));
4328 		}
4329 
4330 		/*
4331 		 * Drain IPP Port = A.9.3.6
4332 		 */
4333 		(void) nxge_ipp_drain(nxgep);
4334 	}
4335 
4336 	/* Reset RXDMA channel */
4337 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4338 	if (rs != NPI_SUCCESS) {
4339 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4340 		    " nxge_rxdma_stop_channel: "
4341 		    " reset rxdma failed (0x%08x channel %d)",
4342 		    rs, channel));
4343 		return (NXGE_ERROR | rs);
4344 	}
4345 
4346 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
4347 	    "==> nxge_rxdma_stop_channel: reset done"));
4348 
4349 	/* Set up the interrupt event masks. */
4350 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
4351 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4352 	    &ent_mask);
4353 	if (rs != NPI_SUCCESS) {
4354 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4355 		    "==> nxge_rxdma_stop_channel: "
4356 		    "set rxdma event masks failed (0x%08x channel %d)",
4357 		    rs, channel));
4358 		return (NXGE_ERROR | rs);
4359 	}
4360 
4361 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
4362 	    "==> nxge_rxdma_stop_channel: event done"));
4363 
4364 	/*
4365 	 * Initialize the receive DMA control and status register
4366 	 */
4367 	cs.value = 0;
4368 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4369 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
4370 	    " to default (all 0s) 0x%08x", cs.value));
4371 	if (status != NXGE_OK) {
4372 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4373 		    " nxge_rxdma_stop_channel: init rxdma"
4374 		    " control register failed (0x%08x channel %d",
4375 		    status, channel));
4376 		return (status);
4377 	}
4378 
4379 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
4380 	    "==> nxge_rxdma_stop_channel: control done"));
4381 
4382 	/*
4383 	 * Make sure channel is disabled.
4384 	 */
4385 	status = nxge_disable_rxdma_channel(nxgep, channel);
4386 
4387 	if (status != NXGE_OK) {
4388 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4389 		    " nxge_rxdma_stop_channel: "
4390 		    " init enable rxdma failed (0x%08x channel %d)",
4391 		    status, channel));
4392 		return (status);
4393 	}
4394 
4395 	if (!isLDOMguest(nxgep)) {
4396 		/*
4397 		 * Enable RxMAC = A.9.2.10
4398 		 */
4399 		if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
4400 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4401 			    "nxge_rxdma_stop_channel: Rx MAC still disabled"));
4402 		}
4403 	}
4404 
4405 	NXGE_DEBUG_MSG((nxgep,
4406 	    RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
4407 
4408 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
4409 
4410 	return (NXGE_OK);
4411 }
4412 
4413 nxge_status_t
4414 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
4415 {
4416 	npi_handle_t		handle;
4417 	p_nxge_rdc_sys_stats_t	statsp;
4418 	rx_ctl_dat_fifo_stat_t	stat;
4419 	uint32_t		zcp_err_status;
4420 	uint32_t		ipp_err_status;
4421 	nxge_status_t		status = NXGE_OK;
4422 	npi_status_t		rs = NPI_SUCCESS;
4423 	boolean_t		my_err = B_FALSE;
4424 
4425 	handle = nxgep->npi_handle;
4426 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4427 
4428 	rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
4429 
4430 	if (rs != NPI_SUCCESS)
4431 		return (NXGE_ERROR | rs);
4432 
4433 	if (stat.bits.ldw.id_mismatch) {
4434 		statsp->id_mismatch++;
4435 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
4436 		    NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
4437 		/* Global fatal error encountered */
4438 	}
4439 
4440 	if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
4441 		switch (nxgep->mac.portnum) {
4442 		case 0:
4443 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
4444 			    (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
4445 				my_err = B_TRUE;
4446 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
4447 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
4448 			}
4449 			break;
4450 		case 1:
4451 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
4452 			    (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
4453 				my_err = B_TRUE;
4454 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
4455 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
4456 			}
4457 			break;
4458 		case 2:
4459 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
4460 			    (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
4461 				my_err = B_TRUE;
4462 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
4463 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
4464 			}
4465 			break;
4466 		case 3:
4467 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
4468 			    (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
4469 				my_err = B_TRUE;
4470 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
4471 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
4472 			}
4473 			break;
4474 		default:
4475 			return (NXGE_ERROR);
4476 		}
4477 	}
4478 
4479 	if (my_err) {
4480 		status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
4481 		    zcp_err_status);
4482 		if (status != NXGE_OK)
4483 			return (status);
4484 	}
4485 
4486 	return (NXGE_OK);
4487 }
4488 
4489 static nxge_status_t
4490 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
4491 							uint32_t zcp_status)
4492 {
4493 	boolean_t		rxport_fatal = B_FALSE;
4494 	p_nxge_rdc_sys_stats_t	statsp;
4495 	nxge_status_t		status = NXGE_OK;
4496 	uint8_t			portn;
4497 
4498 	portn = nxgep->mac.portnum;
4499 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4500 
4501 	if (ipp_status & (0x1 << portn)) {
4502 		statsp->ipp_eop_err++;
4503 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
4504 		    NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
4505 		rxport_fatal = B_TRUE;
4506 	}
4507 
4508 	if (zcp_status & (0x1 << portn)) {
4509 		statsp->zcp_eop_err++;
4510 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
4511 		    NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
4512 		rxport_fatal = B_TRUE;
4513 	}
4514 
4515 	if (rxport_fatal) {
4516 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4517 		    " nxge_rxdma_handle_port_error: "
4518 		    " fatal error on Port #%d\n",
4519 		    portn));
4520 		status = nxge_rx_port_fatal_err_recover(nxgep);
4521 		if (status == NXGE_OK) {
4522 			FM_SERVICE_RESTORED(nxgep);
4523 		}
4524 	}
4525 
4526 	return (status);
4527 }
4528 
4529 static nxge_status_t
4530 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
4531 {
4532 	npi_handle_t		handle;
4533 	npi_status_t		rs = NPI_SUCCESS;
4534 	nxge_status_t		status = NXGE_OK;
4535 	p_rx_rbr_ring_t		rbrp;
4536 	p_rx_rcr_ring_t		rcrp;
4537 	p_rx_mbox_t		mboxp;
4538 	rx_dma_ent_msk_t	ent_mask;
4539 	p_nxge_dma_common_t	dmap;
4540 	uint32_t		ref_cnt;
4541 	p_rx_msg_t		rx_msg_p;
4542 	int			i;
4543 	uint32_t		nxge_port_rcr_size;
4544 
4545 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
4546 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4547 	    "Recovering from RxDMAChannel#%d error...", channel));
4548 
4549 	/*
4550 	 * Stop the dma channel waits for the stop done.
4551 	 * If the stop done bit is not set, then create
4552 	 * an error.
4553 	 */
4554 
4555 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
4556 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
4557 
4558 	rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel];
4559 	rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel];
4560 
4561 	MUTEX_ENTER(&rbrp->lock);
4562 	MUTEX_ENTER(&rbrp->post_lock);
4563 
4564 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
4565 
4566 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
4567 	if (rs != NPI_SUCCESS) {
4568 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4569 		    "nxge_disable_rxdma_channel:failed"));
4570 		goto fail;
4571 	}
4572 
4573 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
4574 
4575 	/* Disable interrupt */
4576 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
4577 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
4578 	if (rs != NPI_SUCCESS) {
4579 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4580 		    "nxge_rxdma_stop_channel: "
4581 		    "set rxdma event masks failed (channel %d)",
4582 		    channel));
4583 	}
4584 
4585 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
4586 
4587 	/* Reset RXDMA channel */
4588 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4589 	if (rs != NPI_SUCCESS) {
4590 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4591 		    "nxge_rxdma_fatal_err_recover: "
4592 		    " reset rxdma failed (channel %d)", channel));
4593 		goto fail;
4594 	}
4595 
4596 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
4597 
4598 	mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
4599 
4600 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
4601 	rbrp->rbr_rd_index = 0;
4602 
4603 	rcrp->comp_rd_index = 0;
4604 	rcrp->comp_wt_index = 0;
4605 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
4606 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
4607 #if defined(__i386)
4608 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4609 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4610 #else
4611 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4612 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4613 #endif
4614 
4615 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
4616 	    (nxge_port_rcr_size - 1);
4617 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
4618 	    (nxge_port_rcr_size - 1);
4619 
4620 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
4621 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
4622 
4623 	cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
4624 
4625 	for (i = 0; i < rbrp->rbr_max_size; i++) {
4626 		rx_msg_p = rbrp->rx_msg_ring[i];
4627 		ref_cnt = rx_msg_p->ref_cnt;
4628 		if (ref_cnt != 1) {
4629 			if (rx_msg_p->cur_usage_cnt !=
4630 			    rx_msg_p->max_usage_cnt) {
4631 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4632 				    "buf[%d]: cur_usage_cnt = %d "
4633 				    "max_usage_cnt = %d\n", i,
4634 				    rx_msg_p->cur_usage_cnt,
4635 				    rx_msg_p->max_usage_cnt));
4636 			} else {
4637 				/* Buffer can be re-posted */
4638 				rx_msg_p->free = B_TRUE;
4639 				rx_msg_p->cur_usage_cnt = 0;
4640 				rx_msg_p->max_usage_cnt = 0xbaddcafe;
4641 				rx_msg_p->pkt_buf_size = 0;
4642 			}
4643 		}
4644 	}
4645 
4646 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
4647 
4648 	status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
4649 	if (status != NXGE_OK) {
4650 		goto fail;
4651 	}
4652 
4653 	MUTEX_EXIT(&rbrp->post_lock);
4654 	MUTEX_EXIT(&rbrp->lock);
4655 
4656 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4657 	    "Recovery Successful, RxDMAChannel#%d Restored",
4658 	    channel));
4659 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
4660 	return (NXGE_OK);
4661 
4662 fail:
4663 	MUTEX_EXIT(&rbrp->post_lock);
4664 	MUTEX_EXIT(&rbrp->lock);
4665 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4666 	return (NXGE_ERROR | rs);
4667 }
4668 
4669 nxge_status_t
4670 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
4671 {
4672 	nxge_grp_set_t *set = &nxgep->rx_set;
4673 	nxge_status_t status = NXGE_OK;
4674 	p_rx_rcr_ring_t rcrp;
4675 	int rdc;
4676 
4677 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
4678 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4679 	    "Recovering from RxPort error..."));
4680 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n"));
4681 
4682 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
4683 		goto fail;
4684 
4685 	NXGE_DELAY(1000);
4686 
4687 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels..."));
4688 
4689 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
4690 		if ((1 << rdc) & set->owned.map) {
4691 			rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc];
4692 			if (rcrp != NULL) {
4693 				MUTEX_ENTER(&rcrp->lock);
4694 				if (nxge_rxdma_fatal_err_recover(nxgep,
4695 				    rdc) != NXGE_OK) {
4696 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4697 					    "Could not recover "
4698 					    "channel %d", rdc));
4699 				}
4700 				MUTEX_EXIT(&rcrp->lock);
4701 			}
4702 		}
4703 	}
4704 
4705 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP..."));
4706 
4707 	/* Reset IPP */
4708 	if (nxge_ipp_reset(nxgep) != NXGE_OK) {
4709 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4710 		    "nxge_rx_port_fatal_err_recover: "
4711 		    "Failed to reset IPP"));
4712 		goto fail;
4713 	}
4714 
4715 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
4716 
4717 	/* Reset RxMAC */
4718 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
4719 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4720 		    "nxge_rx_port_fatal_err_recover: "
4721 		    "Failed to reset RxMAC"));
4722 		goto fail;
4723 	}
4724 
4725 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
4726 
4727 	/* Re-Initialize IPP */
4728 	if (nxge_ipp_init(nxgep) != NXGE_OK) {
4729 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4730 		    "nxge_rx_port_fatal_err_recover: "
4731 		    "Failed to init IPP"));
4732 		goto fail;
4733 	}
4734 
4735 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
4736 
4737 	/* Re-Initialize RxMAC */
4738 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
4739 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4740 		    "nxge_rx_port_fatal_err_recover: "
4741 		    "Failed to reset RxMAC"));
4742 		goto fail;
4743 	}
4744 
4745 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
4746 
4747 	/* Re-enable RxMAC */
4748 	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
4749 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4750 		    "nxge_rx_port_fatal_err_recover: "
4751 		    "Failed to enable RxMAC"));
4752 		goto fail;
4753 	}
4754 
4755 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4756 	    "Recovery Successful, RxPort Restored"));
4757 
4758 	return (NXGE_OK);
4759 fail:
4760 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4761 	return (status);
4762 }
4763 
4764 void
4765 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
4766 {
4767 	rx_dma_ctl_stat_t	cs;
4768 	rx_ctl_dat_fifo_stat_t	cdfs;
4769 
4770 	switch (err_id) {
4771 	case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
4772 	case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
4773 	case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
4774 	case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
4775 	case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
4776 	case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
4777 	case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
4778 	case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
4779 	case NXGE_FM_EREPORT_RDMC_RCRINCON:
4780 	case NXGE_FM_EREPORT_RDMC_RCRFULL:
4781 	case NXGE_FM_EREPORT_RDMC_RBRFULL:
4782 	case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
4783 	case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
4784 	case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
4785 		RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4786 		    chan, &cs.value);
4787 		if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
4788 			cs.bits.hdw.rcr_ack_err = 1;
4789 		else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
4790 			cs.bits.hdw.dc_fifo_err = 1;
4791 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
4792 			cs.bits.hdw.rcr_sha_par = 1;
4793 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
4794 			cs.bits.hdw.rbr_pre_par = 1;
4795 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
4796 			cs.bits.hdw.rbr_tmout = 1;
4797 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
4798 			cs.bits.hdw.rsp_cnt_err = 1;
4799 		else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
4800 			cs.bits.hdw.byte_en_bus = 1;
4801 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
4802 			cs.bits.hdw.rsp_dat_err = 1;
4803 		else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
4804 			cs.bits.hdw.config_err = 1;
4805 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
4806 			cs.bits.hdw.rcrincon = 1;
4807 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
4808 			cs.bits.hdw.rcrfull = 1;
4809 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
4810 			cs.bits.hdw.rbrfull = 1;
4811 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
4812 			cs.bits.hdw.rbrlogpage = 1;
4813 		else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
4814 			cs.bits.hdw.cfiglogpage = 1;
4815 #if defined(__i386)
4816 		cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n",
4817 		    cs.value);
4818 #else
4819 		cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
4820 		    cs.value);
4821 #endif
4822 		RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4823 		    chan, cs.value);
4824 		break;
4825 	case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
4826 	case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
4827 	case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
4828 		cdfs.value = 0;
4829 		if (err_id ==  NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
4830 			cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
4831 		else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
4832 			cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
4833 		else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
4834 			cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
4835 #if defined(__i386)
4836 		cmn_err(CE_NOTE,
4837 		    "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4838 		    cdfs.value);
4839 #else
4840 		cmn_err(CE_NOTE,
4841 		    "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4842 		    cdfs.value);
4843 #endif
4844 		NXGE_REG_WR64(nxgep->npi_handle,
4845 		    RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value);
4846 		break;
4847 	case NXGE_FM_EREPORT_RDMC_DCF_ERR:
4848 		break;
4849 	case NXGE_FM_EREPORT_RDMC_RCR_ERR:
4850 		break;
4851 	}
4852 }
4853 
4854 static void
4855 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)
4856 {
4857 	rxring_info_t 		*ring_info;
4858 	int			index;
4859 	uint32_t		chunk_size;
4860 	uint64_t		kaddr;
4861 	uint_t			num_blocks;
4862 
4863 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free"));
4864 
4865 	if (rbr_p == NULL) {
4866 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4867 		    "==> nxge_rxdma_databuf_free: NULL rbr pointer"));
4868 		return;
4869 	}
4870 
4871 	if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) {
4872 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4873 		    "<== nxge_rxdma_databuf_free: DDI"));
4874 		return;
4875 	}
4876 
4877 	ring_info = rbr_p->ring_info;
4878 	if (ring_info == NULL) {
4879 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4880 		    "==> nxge_rxdma_databuf_free: NULL ring info"));
4881 		return;
4882 	}
4883 	num_blocks = rbr_p->num_blocks;
4884 	for (index = 0; index < num_blocks; index++) {
4885 		kaddr = ring_info->buffer[index].kaddr;
4886 		chunk_size = ring_info->buffer[index].buf_size;
4887 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4888 		    "==> nxge_rxdma_databuf_free: free chunk %d "
4889 		    "kaddrp $%p chunk size %d",
4890 		    index, kaddr, chunk_size));
4891 		if (kaddr == NULL) continue;
4892 		nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size);
4893 		ring_info->buffer[index].kaddr = NULL;
4894 	}
4895 
4896 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free"));
4897 }
4898 
4899 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
4900 extern void contig_mem_free(void *, size_t);
4901 #endif
4902 
4903 void
4904 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size)
4905 {
4906 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf"));
4907 
4908 	if (kaddr == NULL || !buf_size) {
4909 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4910 		    "==> nxge_free_buf: invalid kaddr $%p size to free %d",
4911 		    kaddr, buf_size));
4912 		return;
4913 	}
4914 
4915 	switch (alloc_type) {
4916 	case KMEM_ALLOC:
4917 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4918 		    "==> nxge_free_buf: freeing kmem $%p size %d",
4919 		    kaddr, buf_size));
4920 #if defined(__i386)
4921 		KMEM_FREE((void *)(uint32_t)kaddr, buf_size);
4922 #else
4923 		KMEM_FREE((void *)kaddr, buf_size);
4924 #endif
4925 		break;
4926 
4927 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
4928 	case CONTIG_MEM_ALLOC:
4929 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4930 		    "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
4931 		    kaddr, buf_size));
4932 		contig_mem_free((void *)kaddr, buf_size);
4933 		break;
4934 #endif
4935 
4936 	default:
4937 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4938 		    "<== nxge_free_buf: unsupported alloc type %d",
4939 		    alloc_type));
4940 		return;
4941 	}
4942 
4943 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf"));
4944 }
4945