xref: /titanic_44/usr/src/uts/common/io/hxge/hxge_rxdma.c (revision 4a634bb80136cc001d14ab96addd9915105e5223)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <hxge_impl.h>
29 #include <hxge_rxdma.h>
30 
31 /*
32  * Globals: tunable parameters (/etc/system or adb)
33  *
34  */
35 extern uint32_t hxge_rbr_size;
36 extern uint32_t hxge_rcr_size;
37 extern uint32_t hxge_rbr_spare_size;
38 
39 extern uint32_t hxge_mblks_pending;
40 
41 /*
42  * Tunable to reduce the amount of time spent in the
43  * ISR doing Rx Processing.
44  */
45 extern uint32_t hxge_max_rx_pkts;
46 boolean_t hxge_jumbo_enable;
47 
48 /*
49  * Tunables to manage the receive buffer blocks.
50  *
51  * hxge_rx_threshold_hi: copy all buffers.
52  * hxge_rx_bcopy_size_type: receive buffer block size type.
53  * hxge_rx_threshold_lo: copy only up to tunable block size type.
54  */
55 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi;
56 extern hxge_rxbuf_type_t hxge_rx_buf_size_type;
57 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo;
58 
59 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep);
60 static void hxge_unmap_rxdma(p_hxge_t hxgep);
61 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep);
62 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep);
63 static void hxge_rxdma_hw_stop(p_hxge_t hxgep);
64 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
65 	p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
66 	uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
67 	p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
68 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
69 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
70 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep,
71 	uint16_t dma_channel, p_hxge_dma_common_t *dma_cntl_p,
72 	p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
73 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
74 	p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
75 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep,
76 	uint16_t channel, p_hxge_dma_common_t *dma_buf_p,
77 	p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks);
78 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
79 	p_rx_rbr_ring_t rbr_p);
80 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
81 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p);
82 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel);
83 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
84 	p_rx_rcr_ring_t	*rcr_p, rdc_stat_t cs);
85 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
86 	p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p,
87 	mblk_t ** mp, mblk_t ** mp_cont);
88 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep,
89 	uint16_t channel);
90 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t);
91 static void hxge_freeb(p_rx_msg_t);
92 static void hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex,
93     p_hxge_ldv_t ldvp, rdc_stat_t cs);
94 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index,
95 	p_hxge_ldv_t ldvp, rdc_stat_t cs);
96 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep,
97 	p_rx_rbr_ring_t rx_dmap);
98 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep,
99 	uint16_t channel);
100 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep);
101 
102 hxge_status_t
103 hxge_init_rxdma_channels(p_hxge_t hxgep)
104 {
105 	hxge_status_t status = HXGE_OK;
106 
107 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels"));
108 
109 	status = hxge_map_rxdma(hxgep);
110 	if (status != HXGE_OK) {
111 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
112 		    "<== hxge_init_rxdma: status 0x%x", status));
113 		return (status);
114 	}
115 
116 	status = hxge_rxdma_hw_start_common(hxgep);
117 	if (status != HXGE_OK) {
118 		hxge_unmap_rxdma(hxgep);
119 	}
120 
121 	status = hxge_rxdma_hw_start(hxgep);
122 	if (status != HXGE_OK) {
123 		hxge_unmap_rxdma(hxgep);
124 	}
125 
126 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
127 	    "<== hxge_init_rxdma_channels: status 0x%x", status));
128 	return (status);
129 }
130 
131 void
132 hxge_uninit_rxdma_channels(p_hxge_t hxgep)
133 {
134 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels"));
135 
136 	hxge_rxdma_hw_stop(hxgep);
137 	hxge_unmap_rxdma(hxgep);
138 
139 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels"));
140 }
141 
142 hxge_status_t
143 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel,
144     rdc_stat_t *cs_p)
145 {
146 	hpi_handle_t	handle;
147 	hpi_status_t	rs = HPI_SUCCESS;
148 	hxge_status_t	status = HXGE_OK;
149 
150 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
151 	    "<== hxge_init_rxdma_channel_cntl_stat"));
152 
153 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
154 	rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p);
155 
156 	if (rs != HPI_SUCCESS) {
157 		status = HXGE_ERROR | rs;
158 	}
159 	return (status);
160 }
161 
162 
163 hxge_status_t
164 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
165     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
166 {
167 	hpi_handle_t		handle;
168 	rdc_desc_cfg_t 		rdc_desc;
169 	rdc_rcr_cfg_b_t		*cfgb_p;
170 	hpi_status_t		rs = HPI_SUCCESS;
171 
172 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel"));
173 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
174 
175 	/*
176 	 * Use configuration data composed at init time. Write to hardware the
177 	 * receive ring configurations.
178 	 */
179 	rdc_desc.mbox_enable = 1;
180 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
181 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
182 	    "==> hxge_enable_rxdma_channel: mboxp $%p($%p)",
183 	    mbox_p->mbox_addr, rdc_desc.mbox_addr));
184 
185 	rdc_desc.rbr_len = rbr_p->rbb_max;
186 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
187 
188 	switch (hxgep->rx_bksize_code) {
189 	case RBR_BKSIZE_4K:
190 		rdc_desc.page_size = SIZE_4KB;
191 		break;
192 	case RBR_BKSIZE_8K:
193 		rdc_desc.page_size = SIZE_8KB;
194 		break;
195 	}
196 
197 	rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0;
198 	rdc_desc.valid0 = 1;
199 
200 	rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1;
201 	rdc_desc.valid1 = 1;
202 
203 	rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2;
204 	rdc_desc.valid2 = 1;
205 
206 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
207 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
208 
209 	rdc_desc.rcr_len = rcr_p->comp_size;
210 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
211 
212 	cfgb_p = &(rcr_p->rcr_cfgb);
213 	rdc_desc.rcr_threshold = cfgb_p->bits.pthres;
214 	rdc_desc.rcr_timeout = cfgb_p->bits.timeout;
215 	rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout;
216 
217 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
218 	    "rbr_len qlen %d pagesize code %d rcr_len %d",
219 	    rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
220 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
221 	    "size 0 %d size 1 %d size 2 %d",
222 	    rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1,
223 	    rbr_p->hpi_pkt_buf_size2));
224 
225 	rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
226 	if (rs != HPI_SUCCESS) {
227 		return (HXGE_ERROR | rs);
228 	}
229 
230 	/*
231 	 * Enable the timeout and threshold.
232 	 */
233 	rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
234 	    rdc_desc.rcr_threshold);
235 	if (rs != HPI_SUCCESS) {
236 		return (HXGE_ERROR | rs);
237 	}
238 
239 	rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
240 	    rdc_desc.rcr_timeout);
241 	if (rs != HPI_SUCCESS) {
242 		return (HXGE_ERROR | rs);
243 	}
244 
245 	/* Enable the DMA */
246 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
247 	if (rs != HPI_SUCCESS) {
248 		return (HXGE_ERROR | rs);
249 	}
250 
251 	/* Kick the DMA engine. */
252 	hpi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
253 	/* Clear the rbr empty bit */
254 	(void) hpi_rxdma_channel_rbr_empty_clear(handle, channel);
255 
256 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel"));
257 
258 	return (HXGE_OK);
259 }
260 
261 static hxge_status_t
262 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel)
263 {
264 	hpi_handle_t handle;
265 	hpi_status_t rs = HPI_SUCCESS;
266 
267 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel"));
268 
269 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
270 
271 	/* disable the DMA */
272 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
273 	if (rs != HPI_SUCCESS) {
274 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
275 		    "<== hxge_disable_rxdma_channel:failed (0x%x)", rs));
276 		return (HXGE_ERROR | rs);
277 	}
278 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel"));
279 	return (HXGE_OK);
280 }
281 
282 hxge_status_t
283 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel)
284 {
285 	hpi_handle_t	handle;
286 	hxge_status_t	status = HXGE_OK;
287 
288 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
289 	    "==> hxge_rxdma_channel_rcrflush"));
290 
291 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
292 	hpi_rxdma_rdc_rcr_flush(handle, channel);
293 
294 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
295 	    "<== hxge_rxdma_channel_rcrflush"));
296 	return (status);
297 
298 }
299 
300 #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
301 
302 #define	TO_LEFT -1
303 #define	TO_RIGHT 1
304 #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
305 #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
306 #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
307 #define	NO_HINT 0xffffffff
308 
309 /*ARGSUSED*/
310 hxge_status_t
311 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p,
312     uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
313     uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
314 {
315 	int			bufsize;
316 	uint64_t		pktbuf_pp;
317 	uint64_t		dvma_addr;
318 	rxring_info_t		*ring_info;
319 	int			base_side, end_side;
320 	int			r_index, l_index, anchor_index;
321 	int			found, search_done;
322 	uint32_t		offset, chunk_size, block_size, page_size_mask;
323 	uint32_t		chunk_index, block_index, total_index;
324 	int			max_iterations, iteration;
325 	rxbuf_index_info_t	*bufinfo;
326 
327 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp"));
328 
329 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
330 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
331 	    pkt_buf_addr_pp, pktbufsz_type));
332 
333 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
334 
335 	switch (pktbufsz_type) {
336 	case 0:
337 		bufsize = rbr_p->pkt_buf_size0;
338 		break;
339 	case 1:
340 		bufsize = rbr_p->pkt_buf_size1;
341 		break;
342 	case 2:
343 		bufsize = rbr_p->pkt_buf_size2;
344 		break;
345 	case RCR_SINGLE_BLOCK:
346 		bufsize = 0;
347 		anchor_index = 0;
348 		break;
349 	default:
350 		return (HXGE_ERROR);
351 	}
352 
353 	if (rbr_p->num_blocks == 1) {
354 		anchor_index = 0;
355 		ring_info = rbr_p->ring_info;
356 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
357 
358 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
359 		    "==> hxge_rxbuf_pp_to_vp: (found, 1 block) "
360 		    "buf_pp $%p btype %d anchor_index %d bufinfo $%p",
361 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo));
362 
363 		goto found_index;
364 	}
365 
366 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
367 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d",
368 	    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
369 
370 	ring_info = rbr_p->ring_info;
371 	found = B_FALSE;
372 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
373 	iteration = 0;
374 	max_iterations = ring_info->max_iterations;
375 
376 	/*
377 	 * First check if this block have been seen recently. This is indicated
378 	 * by a hint which is initialized when the first buffer of the block is
379 	 * seen. The hint is reset when the last buffer of the block has been
380 	 * processed. As three block sizes are supported, three hints are kept.
381 	 * The idea behind the hints is that once the hardware  uses a block
382 	 * for a buffer  of that size, it will use it exclusively for that size
383 	 * and will use it until it is exhausted. It is assumed that there
384 	 * would a single block being used for the same buffer sizes at any
385 	 * given time.
386 	 */
387 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
388 		anchor_index = ring_info->hint[pktbufsz_type];
389 		dvma_addr = bufinfo[anchor_index].dvma_addr;
390 		chunk_size = bufinfo[anchor_index].buf_size;
391 		if ((pktbuf_pp >= dvma_addr) &&
392 		    (pktbuf_pp < (dvma_addr + chunk_size))) {
393 			found = B_TRUE;
394 			/*
395 			 * check if this is the last buffer in the block If so,
396 			 * then reset the hint for the size;
397 			 */
398 
399 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
400 				ring_info->hint[pktbufsz_type] = NO_HINT;
401 		}
402 	}
403 
404 	if (found == B_FALSE) {
405 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
406 		    "==> hxge_rxbuf_pp_to_vp: (!found)"
407 		    "buf_pp $%p btype %d anchor_index %d",
408 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
409 
410 		/*
411 		 * This is the first buffer of the block of this size. Need to
412 		 * search the whole information array. the search algorithm
413 		 * uses a binary tree search algorithm. It assumes that the
414 		 * information is already sorted with increasing order info[0]
415 		 * < info[1] < info[2]  .... < info[n-1] where n is the size of
416 		 * the information array
417 		 */
418 		r_index = rbr_p->num_blocks - 1;
419 		l_index = 0;
420 		search_done = B_FALSE;
421 		anchor_index = MID_INDEX(r_index, l_index);
422 		while (search_done == B_FALSE) {
423 			if ((r_index == l_index) ||
424 			    (iteration >= max_iterations))
425 				search_done = B_TRUE;
426 
427 			end_side = TO_RIGHT;	/* to the right */
428 			base_side = TO_LEFT;	/* to the left */
429 			/* read the DVMA address information and sort it */
430 			dvma_addr = bufinfo[anchor_index].dvma_addr;
431 			chunk_size = bufinfo[anchor_index].buf_size;
432 
433 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
434 			    "==> hxge_rxbuf_pp_to_vp: (searching)"
435 			    "buf_pp $%p btype %d "
436 			    "anchor_index %d chunk_size %d dvmaaddr $%p",
437 			    pkt_buf_addr_pp, pktbufsz_type, anchor_index,
438 			    chunk_size, dvma_addr));
439 
440 			if (pktbuf_pp >= dvma_addr)
441 				base_side = TO_RIGHT;	/* to the right */
442 			if (pktbuf_pp < (dvma_addr + chunk_size))
443 				end_side = TO_LEFT;	/* to the left */
444 
445 			switch (base_side + end_side) {
446 			case IN_MIDDLE:
447 				/* found */
448 				found = B_TRUE;
449 				search_done = B_TRUE;
450 				if ((pktbuf_pp + bufsize) <
451 				    (dvma_addr + chunk_size))
452 					ring_info->hint[pktbufsz_type] =
453 					    bufinfo[anchor_index].buf_index;
454 				break;
455 			case BOTH_RIGHT:
456 				/* not found: go to the right */
457 				l_index = anchor_index + 1;
458 				anchor_index = MID_INDEX(r_index, l_index);
459 				break;
460 
461 			case BOTH_LEFT:
462 				/* not found: go to the left */
463 				r_index = anchor_index - 1;
464 				anchor_index = MID_INDEX(r_index, l_index);
465 				break;
466 			default:	/* should not come here */
467 				return (HXGE_ERROR);
468 			}
469 			iteration++;
470 		}
471 
472 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
473 		    "==> hxge_rxbuf_pp_to_vp: (search done)"
474 		    "buf_pp $%p btype %d anchor_index %d",
475 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
476 	}
477 
478 	if (found == B_FALSE) {
479 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
480 		    "==> hxge_rxbuf_pp_to_vp: (search failed)"
481 		    "buf_pp $%p btype %d anchor_index %d",
482 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
483 		return (HXGE_ERROR);
484 	}
485 
486 found_index:
487 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
488 	    "==> hxge_rxbuf_pp_to_vp: (FOUND1)"
489 	    "buf_pp $%p btype %d bufsize %d anchor_index %d",
490 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index));
491 
492 	/* index of the first block in this chunk */
493 	chunk_index = bufinfo[anchor_index].start_index;
494 	dvma_addr = bufinfo[anchor_index].dvma_addr;
495 	page_size_mask = ring_info->block_size_mask;
496 
497 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
498 	    "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
499 	    "buf_pp $%p btype %d bufsize %d "
500 	    "anchor_index %d chunk_index %d dvma $%p",
501 	    pkt_buf_addr_pp, pktbufsz_type, bufsize,
502 	    anchor_index, chunk_index, dvma_addr));
503 
504 	offset = pktbuf_pp - dvma_addr;	/* offset within the chunk */
505 	block_size = rbr_p->block_size;	/* System  block(page) size */
506 
507 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
508 	    "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
509 	    "buf_pp $%p btype %d bufsize %d "
510 	    "anchor_index %d chunk_index %d dvma $%p "
511 	    "offset %d block_size %d",
512 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index,
513 	    chunk_index, dvma_addr, offset, block_size));
514 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index"));
515 
516 	block_index = (offset / block_size);	/* index within chunk */
517 	total_index = chunk_index + block_index;
518 
519 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
520 	    "==> hxge_rxbuf_pp_to_vp: "
521 	    "total_index %d dvma_addr $%p "
522 	    "offset %d block_size %d "
523 	    "block_index %d ",
524 	    total_index, dvma_addr, offset, block_size, block_index));
525 
526 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
527 	    offset);
528 
529 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
530 	    "==> hxge_rxbuf_pp_to_vp: "
531 	    "total_index %d dvma_addr $%p "
532 	    "offset %d block_size %d "
533 	    "block_index %d "
534 	    "*pkt_buf_addr_p $%p",
535 	    total_index, dvma_addr, offset, block_size,
536 	    block_index, *pkt_buf_addr_p));
537 
538 	*msg_index = total_index;
539 	*bufoffset = (offset & page_size_mask);
540 
541 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
542 	    "==> hxge_rxbuf_pp_to_vp: get msg index: "
543 	    "msg_index %d bufoffset_index %d",
544 	    *msg_index, *bufoffset));
545 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp"));
546 
547 	return (HXGE_OK);
548 }
549 
550 
551 /*
552  * used by quick sort (qsort) function
553  * to perform comparison
554  */
555 static int
556 hxge_sort_compare(const void *p1, const void *p2)
557 {
558 
559 	rxbuf_index_info_t *a, *b;
560 
561 	a = (rxbuf_index_info_t *)p1;
562 	b = (rxbuf_index_info_t *)p2;
563 
564 	if (a->dvma_addr > b->dvma_addr)
565 		return (1);
566 	if (a->dvma_addr < b->dvma_addr)
567 		return (-1);
568 	return (0);
569 }
570 
571 /*
572  * Grabbed this sort implementation from common/syscall/avl.c
573  *
574  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
575  * v = Ptr to array/vector of objs
576  * n = # objs in the array
577  * s = size of each obj (must be multiples of a word size)
578  * f = ptr to function to compare two objs
579  *	returns (-1 = less than, 0 = equal, 1 = greater than
580  */
581 void
582 hxge_ksort(caddr_t v, int n, int s, int (*f) ())
583 {
584 	int		g, i, j, ii;
585 	unsigned int	*p1, *p2;
586 	unsigned int	tmp;
587 
588 	/* No work to do */
589 	if (v == NULL || n <= 1)
590 		return;
591 	/* Sanity check on arguments */
592 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
593 	ASSERT(s > 0);
594 
595 	for (g = n / 2; g > 0; g /= 2) {
596 		for (i = g; i < n; i++) {
597 			for (j = i - g; j >= 0 &&
598 			    (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) {
599 				p1 = (unsigned *)(v + j * s);
600 				p2 = (unsigned *)(v + (j + g) * s);
601 				for (ii = 0; ii < s / 4; ii++) {
602 					tmp = *p1;
603 					*p1++ = *p2;
604 					*p2++ = tmp;
605 				}
606 			}
607 		}
608 	}
609 }
610 
611 /*
612  * Initialize data structures required for rxdma
613  * buffer dvma->vmem address lookup
614  */
615 /*ARGSUSED*/
616 static hxge_status_t
617 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp)
618 {
619 	int		index;
620 	rxring_info_t	*ring_info;
621 	int		max_iteration = 0, max_index = 0;
622 
623 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init"));
624 
625 	ring_info = rbrp->ring_info;
626 	ring_info->hint[0] = NO_HINT;
627 	ring_info->hint[1] = NO_HINT;
628 	ring_info->hint[2] = NO_HINT;
629 	max_index = rbrp->num_blocks;
630 
631 	/* read the DVMA address information and sort it */
632 	/* do init of the information array */
633 
634 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
635 	    " hxge_rxbuf_index_info_init Sort ptrs"));
636 
637 	/* sort the array */
638 	hxge_ksort((void *) ring_info->buffer, max_index,
639 	    sizeof (rxbuf_index_info_t), hxge_sort_compare);
640 
641 	for (index = 0; index < max_index; index++) {
642 		HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
643 		    " hxge_rxbuf_index_info_init: sorted chunk %d "
644 		    " ioaddr $%p kaddr $%p size %x",
645 		    index, ring_info->buffer[index].dvma_addr,
646 		    ring_info->buffer[index].kaddr,
647 		    ring_info->buffer[index].buf_size));
648 	}
649 
650 	max_iteration = 0;
651 	while (max_index >= (1ULL << max_iteration))
652 		max_iteration++;
653 	ring_info->max_iterations = max_iteration + 1;
654 
655 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
656 	    " hxge_rxbuf_index_info_init Find max iter %d",
657 	    ring_info->max_iterations));
658 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init"));
659 
660 	return (HXGE_OK);
661 }
662 
663 /*ARGSUSED*/
664 void
665 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p)
666 {
667 #ifdef	HXGE_DEBUG
668 
669 	uint32_t bptr;
670 	uint64_t pp;
671 
672 	bptr = entry_p->bits.pkt_buf_addr;
673 
674 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
675 	    "\trcr entry $%p "
676 	    "\trcr entry 0x%0llx "
677 	    "\trcr entry 0x%08x "
678 	    "\trcr entry 0x%08x "
679 	    "\tvalue 0x%0llx\n"
680 	    "\tmulti = %d\n"
681 	    "\tpkt_type = 0x%x\n"
682 	    "\terror = 0x%04x\n"
683 	    "\tl2_len = %d\n"
684 	    "\tpktbufsize = %d\n"
685 	    "\tpkt_buf_addr = $%p\n"
686 	    "\tpkt_buf_addr (<< 6) = $%p\n",
687 	    entry_p,
688 	    *(int64_t *)entry_p,
689 	    *(int32_t *)entry_p,
690 	    *(int32_t *)((char *)entry_p + 32),
691 	    entry_p->value,
692 	    entry_p->bits.multi,
693 	    entry_p->bits.pkt_type,
694 	    entry_p->bits.error,
695 	    entry_p->bits.l2_len,
696 	    entry_p->bits.pktbufsz,
697 	    bptr,
698 	    entry_p->bits.pkt_buf_addr));
699 
700 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
701 	    RCR_PKT_BUF_ADDR_SHIFT;
702 
703 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
704 	    pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
705 #endif
706 }
707 
708 /*ARGSUSED*/
709 void
710 hxge_rxdma_stop(p_hxge_t hxgep)
711 {
712 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop"));
713 
714 	(void) hxge_rx_vmac_disable(hxgep);
715 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
716 
717 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop"));
718 }
719 
720 void
721 hxge_rxdma_stop_reinit(p_hxge_t hxgep)
722 {
723 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit"));
724 
725 	(void) hxge_rxdma_stop(hxgep);
726 	(void) hxge_uninit_rxdma_channels(hxgep);
727 	(void) hxge_init_rxdma_channels(hxgep);
728 
729 	(void) hxge_rx_vmac_enable(hxgep);
730 
731 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit"));
732 }
733 
734 hxge_status_t
735 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
736 {
737 	int			i, ndmas;
738 	uint16_t		channel;
739 	p_rx_rbr_rings_t	rx_rbr_rings;
740 	p_rx_rbr_ring_t		*rbr_rings;
741 	hpi_handle_t		handle;
742 	hpi_status_t		rs = HPI_SUCCESS;
743 	hxge_status_t		status = HXGE_OK;
744 
745 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
746 	    "==> hxge_rxdma_hw_mode: mode %d", enable));
747 
748 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
749 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
750 		    "<== hxge_rxdma_mode: not initialized"));
751 		return (HXGE_ERROR);
752 	}
753 
754 	rx_rbr_rings = hxgep->rx_rbr_rings;
755 	if (rx_rbr_rings == NULL) {
756 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
757 		    "<== hxge_rxdma_mode: NULL ring pointer"));
758 		return (HXGE_ERROR);
759 	}
760 
761 	if (rx_rbr_rings->rbr_rings == NULL) {
762 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
763 		    "<== hxge_rxdma_mode: NULL rbr rings pointer"));
764 		return (HXGE_ERROR);
765 	}
766 
767 	ndmas = rx_rbr_rings->ndmas;
768 	if (!ndmas) {
769 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
770 		    "<== hxge_rxdma_mode: no channel"));
771 		return (HXGE_ERROR);
772 	}
773 
774 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
775 	    "==> hxge_rxdma_mode (ndmas %d)", ndmas));
776 
777 	rbr_rings = rx_rbr_rings->rbr_rings;
778 
779 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
780 
781 	for (i = 0; i < ndmas; i++) {
782 		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
783 			continue;
784 		}
785 		channel = rbr_rings[i]->rdc;
786 		if (enable) {
787 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
788 			    "==> hxge_rxdma_hw_mode: channel %d (enable)",
789 			    channel));
790 			rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
791 		} else {
792 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
793 			    "==> hxge_rxdma_hw_mode: channel %d (disable)",
794 			    channel));
795 			rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
796 		}
797 	}
798 
799 	status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
800 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
801 	    "<== hxge_rxdma_hw_mode: status 0x%x", status));
802 
803 	return (status);
804 }
805 
806 int
807 hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel)
808 {
809 	int			i, ndmas;
810 	uint16_t		rdc;
811 	p_rx_rbr_rings_t 	rx_rbr_rings;
812 	p_rx_rbr_ring_t		*rbr_rings;
813 
814 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
815 	    "==> hxge_rxdma_get_ring_index: channel %d", channel));
816 
817 	rx_rbr_rings = hxgep->rx_rbr_rings;
818 	if (rx_rbr_rings == NULL) {
819 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
820 		    "<== hxge_rxdma_get_ring_index: NULL ring pointer"));
821 		return (-1);
822 	}
823 
824 	ndmas = rx_rbr_rings->ndmas;
825 	if (!ndmas) {
826 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
827 		    "<== hxge_rxdma_get_ring_index: no channel"));
828 		return (-1);
829 	}
830 
831 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
832 	    "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas));
833 
834 	rbr_rings = rx_rbr_rings->rbr_rings;
835 	for (i = 0; i < ndmas; i++) {
836 		rdc = rbr_rings[i]->rdc;
837 		if (channel == rdc) {
838 			HXGE_DEBUG_MSG((hxgep, RX_CTL,
839 			    "==> hxge_rxdma_get_rbr_ring: "
840 			    "channel %d (index %d) "
841 			    "ring %d", channel, i, rbr_rings[i]));
842 
843 			return (i);
844 		}
845 	}
846 
847 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
848 	    "<== hxge_rxdma_get_rbr_ring_index: not found"));
849 
850 	return (-1);
851 }
852 
853 /*
854  * Static functions start here.
855  */
856 static p_rx_msg_t
857 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p)
858 {
859 	p_rx_msg_t		hxge_mp = NULL;
860 	p_hxge_dma_common_t	dmamsg_p;
861 	uchar_t			*buffer;
862 
863 	hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
864 	if (hxge_mp == NULL) {
865 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
866 		    "Allocation of a rx msg failed."));
867 		goto hxge_allocb_exit;
868 	}
869 
870 	hxge_mp->use_buf_pool = B_FALSE;
871 	if (dmabuf_p) {
872 		hxge_mp->use_buf_pool = B_TRUE;
873 
874 		dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma;
875 		*dmamsg_p = *dmabuf_p;
876 		dmamsg_p->nblocks = 1;
877 		dmamsg_p->block_size = size;
878 		dmamsg_p->alength = size;
879 		buffer = (uchar_t *)dmabuf_p->kaddrp;
880 
881 		dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size);
882 		dmabuf_p->ioaddr_pp = (void *)
883 		    ((char *)dmabuf_p->ioaddr_pp + size);
884 
885 		dmabuf_p->alength -= size;
886 		dmabuf_p->offset += size;
887 		dmabuf_p->dma_cookie.dmac_laddress += size;
888 		dmabuf_p->dma_cookie.dmac_size -= size;
889 	} else {
890 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
891 		if (buffer == NULL) {
892 			HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
893 			    "Allocation of a receive page failed."));
894 			goto hxge_allocb_fail1;
895 		}
896 	}
897 
898 	hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb);
899 	if (hxge_mp->rx_mblk_p == NULL) {
900 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed."));
901 		goto hxge_allocb_fail2;
902 	}
903 	hxge_mp->buffer = buffer;
904 	hxge_mp->block_size = size;
905 	hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb;
906 	hxge_mp->freeb.free_arg = (caddr_t)hxge_mp;
907 	hxge_mp->ref_cnt = 1;
908 	hxge_mp->free = B_TRUE;
909 	hxge_mp->rx_use_bcopy = B_FALSE;
910 
911 	atomic_add_32(&hxge_mblks_pending, 1);
912 
913 	goto hxge_allocb_exit;
914 
915 hxge_allocb_fail2:
916 	if (!hxge_mp->use_buf_pool) {
917 		KMEM_FREE(buffer, size);
918 	}
919 hxge_allocb_fail1:
920 	KMEM_FREE(hxge_mp, sizeof (rx_msg_t));
921 	hxge_mp = NULL;
922 
923 hxge_allocb_exit:
924 	return (hxge_mp);
925 }
926 
927 p_mblk_t
928 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
929 {
930 	p_mblk_t mp;
931 
932 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb"));
933 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p "
934 	    "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size));
935 
936 	mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb);
937 	if (mp == NULL) {
938 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
939 		goto hxge_dupb_exit;
940 	}
941 
942 	atomic_inc_32(&hxge_mp->ref_cnt);
943 	atomic_inc_32(&hxge_mblks_pending);
944 
945 hxge_dupb_exit:
946 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
947 	return (mp);
948 }
949 
950 p_mblk_t
951 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
952 {
953 	p_mblk_t	mp;
954 	uchar_t		*dp;
955 
956 	mp = allocb(size + HXGE_RXBUF_EXTRA, 0);
957 	if (mp == NULL) {
958 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
959 		goto hxge_dupb_bcopy_exit;
960 	}
961 	dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA;
962 	bcopy((void *) &hxge_mp->buffer[offset], dp, size);
963 	mp->b_wptr = dp + size;
964 
965 hxge_dupb_bcopy_exit:
966 
967 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
968 
969 	return (mp);
970 }
971 
972 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p,
973     p_rx_msg_t rx_msg_p);
974 
975 void
976 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
977 {
978 	hpi_handle_t handle;
979 
980 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page"));
981 
982 	/* Reuse this buffer */
983 	rx_msg_p->free = B_FALSE;
984 	rx_msg_p->cur_usage_cnt = 0;
985 	rx_msg_p->max_usage_cnt = 0;
986 	rx_msg_p->pkt_buf_size = 0;
987 
988 	if (rx_rbr_p->rbr_use_bcopy) {
989 		rx_msg_p->rx_use_bcopy = B_FALSE;
990 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
991 	}
992 
993 	/*
994 	 * Get the rbr header pointer and its offset index.
995 	 */
996 	MUTEX_ENTER(&rx_rbr_p->post_lock);
997 
998 	rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
999 	    rx_rbr_p->rbr_wrap_mask);
1000 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1001 
1002 	/*
1003 	 * Don't post when index is close to 0 or near the max to reduce the
1004 	 * number rbr_emepty errors
1005 	 */
1006 	rx_rbr_p->pages_to_post++;
1007 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1008 	if (rx_rbr_p->rbr_wr_index > (rx_rbr_p->pages_to_skip / 2) &&
1009 	    rx_rbr_p->rbr_wr_index < rx_rbr_p->pages_to_post_threshold) {
1010 		hpi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc,
1011 		    rx_rbr_p->pages_to_post);
1012 		rx_rbr_p->pages_to_post = 0;
1013 	}
1014 
1015 	MUTEX_EXIT(&rx_rbr_p->post_lock);
1016 
1017 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1018 	    "<== hxge_post_page (channel %d post_next_index %d)",
1019 	    rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1020 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page"));
1021 }
1022 
1023 void
1024 hxge_freeb(p_rx_msg_t rx_msg_p)
1025 {
1026 	size_t		size;
1027 	uchar_t		*buffer = NULL;
1028 	int		ref_cnt;
1029 	boolean_t	free_state = B_FALSE;
1030 	rx_rbr_ring_t	*ring = rx_msg_p->rx_rbr_p;
1031 
1032 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb"));
1033 	HXGE_DEBUG_MSG((NULL, MEM2_CTL,
1034 	    "hxge_freeb:rx_msg_p = $%p (block pending %d)",
1035 	    rx_msg_p, hxge_mblks_pending));
1036 
1037 	atomic_dec_32(&hxge_mblks_pending);
1038 
1039 	/*
1040 	 * First we need to get the free state, then
1041 	 * atomic decrement the reference count to prevent
1042 	 * the race condition with the interrupt thread that
1043 	 * is processing a loaned up buffer block.
1044 	 */
1045 	free_state = rx_msg_p->free;
1046 
1047 	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
1048 	if (!ref_cnt) {
1049 		buffer = rx_msg_p->buffer;
1050 		size = rx_msg_p->block_size;
1051 
1052 		HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: "
1053 		    "will free: rx_msg_p = $%p (block pending %d)",
1054 		    rx_msg_p, hxge_mblks_pending));
1055 
1056 		if (!rx_msg_p->use_buf_pool) {
1057 			KMEM_FREE(buffer, size);
1058 		}
1059 
1060 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1061 		/* Decrement the receive buffer ring's reference count, too. */
1062 		atomic_dec_32(&ring->rbr_ref_cnt);
1063 
1064 		/*
1065 		 * Free the receive buffer ring, iff
1066 		 * 1. all the receive buffers have been freed
1067 		 * 2. and we are in the proper state (that is,
1068 		 *    we are not UNMAPPING).
1069 		 */
1070 		if (ring->rbr_ref_cnt == 0 && ring->rbr_state == RBR_UNMAPPED) {
1071 			KMEM_FREE(ring, sizeof (*ring));
1072 		}
1073 		return;
1074 	}
1075 
1076 	/*
1077 	 * Repost buffer.
1078 	 */
1079 	if (free_state && (ref_cnt == 1)) {
1080 		HXGE_DEBUG_MSG((NULL, RX_CTL,
1081 		    "hxge_freeb: post page $%p:", rx_msg_p));
1082 		if (ring->rbr_state == RBR_POSTING)
1083 			hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p);
1084 	}
1085 
1086 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb"));
1087 }
1088 
1089 uint_t
1090 hxge_rx_intr(caddr_t arg1, caddr_t arg2)
1091 {
1092 	p_hxge_ldv_t		ldvp = (p_hxge_ldv_t)arg1;
1093 	p_hxge_t		hxgep = (p_hxge_t)arg2;
1094 	p_hxge_ldg_t		ldgp;
1095 	uint8_t			channel;
1096 	hpi_handle_t		handle;
1097 	rdc_stat_t		cs;
1098 	uint_t			serviced = DDI_INTR_UNCLAIMED;
1099 
1100 	if (ldvp == NULL) {
1101 		HXGE_DEBUG_MSG((NULL, RX_INT_CTL,
1102 		    "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1103 		return (DDI_INTR_CLAIMED);
1104 	}
1105 
1106 	if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
1107 		hxgep = ldvp->hxgep;
1108 	}
1109 
1110 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1111 	    "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1112 
1113 	/*
1114 	 * This interrupt handler is for a specific receive dma channel.
1115 	 */
1116 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1117 
1118 	/*
1119 	 * Get the control and status for this channel.
1120 	 */
1121 	channel = ldvp->channel;
1122 	ldgp = ldvp->ldgp;
1123 	RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
1124 
1125 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_intr:channel %d "
1126 	    "cs 0x%016llx rcrto 0x%x rcrthres %x",
1127 	    channel, cs.value, cs.bits.rcr_to, cs.bits.rcr_thres));
1128 
1129 	hxge_rx_pkts_vring(hxgep, ldvp->vdma_index, ldvp, cs);
1130 	serviced = DDI_INTR_CLAIMED;
1131 
1132 	/* error events. */
1133 	if (cs.value & RDC_STAT_ERROR) {
1134 		(void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
1135 	}
1136 
1137 hxge_intr_exit:
1138 	/*
1139 	 * Enable the mailbox update interrupt if we want to use mailbox. We
1140 	 * probably don't need to use mailbox as it only saves us one pio read.
1141 	 * Also write 1 to rcrthres and rcrto to clear these two edge triggered
1142 	 * bits.
1143 	 */
1144 	cs.value &= RDC_STAT_WR1C;
1145 	cs.bits.mex = 1;
1146 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1147 
1148 	/*
1149 	 * Rearm this logical group if this is a single device group.
1150 	 */
1151 	if (ldgp->nldvs == 1) {
1152 		ld_intr_mgmt_t mgm;
1153 
1154 		mgm.value = 0;
1155 		mgm.bits.arm = 1;
1156 		mgm.bits.timer = ldgp->ldg_timer;
1157 		HXGE_REG_WR32(handle,
1158 		    LD_INTR_MGMT + LDSV_OFFSET(ldgp->ldg), mgm.value);
1159 	}
1160 
1161 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1162 	    "<== hxge_rx_intr: serviced %d", serviced));
1163 
1164 	return (serviced);
1165 }
1166 
1167 static void
1168 hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
1169     rdc_stat_t cs)
1170 {
1171 	p_mblk_t		mp;
1172 	p_rx_rcr_ring_t		rcrp;
1173 
1174 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts_vring"));
1175 	if ((mp = hxge_rx_pkts(hxgep, vindex, ldvp, &rcrp, cs)) == NULL) {
1176 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1177 		    "<== hxge_rx_pkts_vring: no mp"));
1178 		return;
1179 	}
1180 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts_vring: $%p", mp));
1181 
1182 #ifdef  HXGE_DEBUG
1183 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1184 	    "==> hxge_rx_pkts_vring:calling mac_rx (NEMO) "
1185 	    "LEN %d mp $%p mp->b_next $%p rcrp $%p "
1186 	    "mac_handle $%p",
1187 	    (mp->b_wptr - mp->b_rptr), mp, mp->b_next,
1188 	    rcrp, rcrp->rcr_mac_handle));
1189 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1190 	    "==> hxge_rx_pkts_vring: dump packets "
1191 	    "(mp $%p b_rptr $%p b_wptr $%p):\n %s",
1192 	    mp, mp->b_rptr, mp->b_wptr,
1193 	    hxge_dump_packet((char *)mp->b_rptr, 64)));
1194 
1195 	if (mp->b_cont) {
1196 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1197 		    "==> hxge_rx_pkts_vring: dump b_cont packets "
1198 		    "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
1199 		    mp->b_cont, mp->b_cont->b_rptr, mp->b_cont->b_wptr,
1200 		    hxge_dump_packet((char *)mp->b_cont->b_rptr,
1201 		    mp->b_cont->b_wptr - mp->b_cont->b_rptr)));
1202 		}
1203 	if (mp->b_next) {
1204 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1205 		    "==> hxge_rx_pkts_vring: dump next packets "
1206 		    "(b_rptr $%p): %s",
1207 		    mp->b_next->b_rptr,
1208 		    hxge_dump_packet((char *)mp->b_next->b_rptr, 64)));
1209 	}
1210 #endif
1211 
1212 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1213 	    "==> hxge_rx_pkts_vring: send packet to stack"));
1214 	mac_rx(hxgep->mach, rcrp->rcr_mac_handle, mp);
1215 
1216 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_pkts_vring"));
1217 }
1218 
1219 /*ARGSUSED*/
1220 mblk_t *
1221 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
1222     p_rx_rcr_ring_t *rcrp, rdc_stat_t cs)
1223 {
1224 	hpi_handle_t		handle;
1225 	uint8_t			channel;
1226 	p_rx_rcr_rings_t	rx_rcr_rings;
1227 	p_rx_rcr_ring_t		rcr_p;
1228 	uint32_t		comp_rd_index;
1229 	p_rcr_entry_t		rcr_desc_rd_head_p;
1230 	p_rcr_entry_t		rcr_desc_rd_head_pp;
1231 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
1232 	uint16_t		qlen, nrcr_read, npkt_read;
1233 	uint32_t		qlen_hw;
1234 	boolean_t		multi;
1235 	rdc_rcr_cfg_b_t		rcr_cfg_b;
1236 #if defined(_BIG_ENDIAN)
1237 	hpi_status_t		rs = HPI_SUCCESS;
1238 #endif
1239 
1240 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d "
1241 	    "channel %d", vindex, ldvp->channel));
1242 
1243 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1244 		return (NULL);
1245 	}
1246 
1247 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1248 	rx_rcr_rings = hxgep->rx_rcr_rings;
1249 	rcr_p = rx_rcr_rings->rcr_rings[vindex];
1250 	channel = rcr_p->rdc;
1251 	if (channel != ldvp->channel) {
1252 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d "
1253 		    "channel %d, and rcr channel %d not matched.",
1254 		    vindex, ldvp->channel, channel));
1255 		return (NULL);
1256 	}
1257 
1258 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1259 	    "==> hxge_rx_pkts: START: rcr channel %d "
1260 	    "head_p $%p head_pp $%p  index %d ",
1261 	    channel, rcr_p->rcr_desc_rd_head_p,
1262 	    rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index));
1263 
1264 #if !defined(_BIG_ENDIAN)
1265 	qlen = RXDMA_REG_READ32(handle, RDC_RCR_QLEN, channel) & 0xffff;
1266 #else
1267 	rs = hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
1268 	if (rs != HPI_SUCCESS) {
1269 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d "
1270 		    "channel %d, get qlen failed 0x%08x",
1271 		    vindex, ldvp->channel, rs));
1272 		return (NULL);
1273 	}
1274 #endif
1275 	if (!qlen) {
1276 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1277 		    "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)",
1278 		    channel, qlen));
1279 		return (NULL);
1280 	}
1281 
1282 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d "
1283 	    "qlen %d", channel, qlen));
1284 
1285 	comp_rd_index = rcr_p->comp_rd_index;
1286 
1287 	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
1288 	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
1289 	nrcr_read = npkt_read = 0;
1290 
1291 	/*
1292 	 * Number of packets queued (The jumbo or multi packet will be counted
1293 	 * as only one paccket and it may take up more than one completion
1294 	 * entry).
1295 	 */
1296 	qlen_hw = (qlen < hxge_max_rx_pkts) ? qlen : hxge_max_rx_pkts;
1297 	head_mp = NULL;
1298 	tail_mp = &head_mp;
1299 	nmp = mp_cont = NULL;
1300 	multi = B_FALSE;
1301 
1302 	while (qlen_hw) {
1303 #ifdef HXGE_DEBUG
1304 		hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p);
1305 #endif
1306 		/*
1307 		 * Process one completion ring entry.
1308 		 */
1309 		hxge_receive_packet(hxgep,
1310 		    rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
1311 
1312 		/*
1313 		 * message chaining modes (nemo msg chaining)
1314 		 */
1315 		if (nmp) {
1316 			nmp->b_next = NULL;
1317 			if (!multi && !mp_cont) { /* frame fits a partition */
1318 				*tail_mp = nmp;
1319 				tail_mp = &nmp->b_next;
1320 				nmp = NULL;
1321 			} else if (multi && !mp_cont) { /* first segment */
1322 				*tail_mp = nmp;
1323 				tail_mp = &nmp->b_cont;
1324 			} else if (multi && mp_cont) {	/* mid of multi segs */
1325 				*tail_mp = mp_cont;
1326 				tail_mp = &mp_cont->b_cont;
1327 			} else if (!multi && mp_cont) { /* last segment */
1328 				*tail_mp = mp_cont;
1329 				tail_mp = &nmp->b_next;
1330 				nmp = NULL;
1331 			}
1332 		}
1333 
1334 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1335 		    "==> hxge_rx_pkts: loop: rcr channel %d "
1336 		    "before updating: multi %d "
1337 		    "nrcr_read %d "
1338 		    "npk read %d "
1339 		    "head_pp $%p  index %d ",
1340 		    channel, multi,
1341 		    nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index));
1342 
1343 		if (!multi) {
1344 			qlen_hw--;
1345 			npkt_read++;
1346 		}
1347 
1348 		/*
1349 		 * Update the next read entry.
1350 		 */
1351 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
1352 		    rcr_p->comp_wrap_mask);
1353 
1354 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
1355 		    rcr_p->rcr_desc_first_p, rcr_p->rcr_desc_last_p);
1356 
1357 		nrcr_read++;
1358 
1359 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1360 		    "<== hxge_rx_pkts: (SAM, process one packet) "
1361 		    "nrcr_read %d", nrcr_read));
1362 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1363 		    "==> hxge_rx_pkts: loop: rcr channel %d "
1364 		    "multi %d nrcr_read %d npk read %d head_pp $%p  index %d ",
1365 		    channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp,
1366 		    comp_rd_index));
1367 	}
1368 
1369 	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
1370 	rcr_p->comp_rd_index = comp_rd_index;
1371 	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
1372 
1373 	if ((hxgep->intr_timeout != rcr_p->intr_timeout) ||
1374 	    (hxgep->intr_threshold != rcr_p->intr_threshold)) {
1375 		rcr_p->intr_timeout = hxgep->intr_timeout;
1376 		rcr_p->intr_threshold = hxgep->intr_threshold;
1377 		rcr_cfg_b.value = 0x0ULL;
1378 		if (rcr_p->intr_timeout)
1379 			rcr_cfg_b.bits.entout = 1;
1380 		rcr_cfg_b.bits.timeout = rcr_p->intr_timeout;
1381 		rcr_cfg_b.bits.pthres = rcr_p->intr_threshold;
1382 		RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B,
1383 		    channel, rcr_cfg_b.value);
1384 	}
1385 
1386 	cs.bits.pktread = npkt_read;
1387 	cs.bits.ptrread = nrcr_read;
1388 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1389 
1390 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1391 	    "==> hxge_rx_pkts: EXIT: rcr channel %d "
1392 	    "head_pp $%p  index %016llx ",
1393 	    channel, rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index));
1394 
1395 	/*
1396 	 * Update RCR buffer pointer read and number of packets read.
1397 	 */
1398 
1399 	*rcrp = rcr_p;
1400 
1401 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts"));
1402 
1403 	return (head_mp);
1404 }
1405 
1406 /*ARGSUSED*/
1407 void
1408 hxge_receive_packet(p_hxge_t hxgep,
1409     p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
1410     boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
1411 {
1412 	p_mblk_t		nmp = NULL;
1413 	uint64_t		multi;
1414 	uint8_t			channel;
1415 
1416 	boolean_t first_entry = B_TRUE;
1417 	boolean_t is_tcp_udp = B_FALSE;
1418 	boolean_t buffer_free = B_FALSE;
1419 	boolean_t error_send_up = B_FALSE;
1420 	uint8_t error_type;
1421 	uint16_t l2_len;
1422 	uint16_t skip_len;
1423 	uint8_t pktbufsz_type;
1424 	uint64_t rcr_entry;
1425 	uint64_t *pkt_buf_addr_pp;
1426 	uint64_t *pkt_buf_addr_p;
1427 	uint32_t buf_offset;
1428 	uint32_t bsize;
1429 	uint32_t msg_index;
1430 	p_rx_rbr_ring_t rx_rbr_p;
1431 	p_rx_msg_t *rx_msg_ring_p;
1432 	p_rx_msg_t rx_msg_p;
1433 
1434 	uint16_t sw_offset_bytes = 0, hdr_size = 0;
1435 	hxge_status_t status = HXGE_OK;
1436 	boolean_t is_valid = B_FALSE;
1437 	p_hxge_rx_ring_stats_t rdc_stats;
1438 	uint32_t bytes_read;
1439 
1440 	uint64_t pkt_type;
1441 
1442 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet"));
1443 
1444 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
1445 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
1446 
1447 	multi = (rcr_entry & RCR_MULTI_MASK);
1448 	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
1449 
1450 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
1451 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
1452 
1453 	/*
1454 	 * Hardware does not strip the CRC due bug ID 11451 where
1455 	 * the hardware mis handles minimum size packets.
1456 	 */
1457 	l2_len -= ETHERFCSL;
1458 
1459 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
1460 	    RCR_PKTBUFSZ_SHIFT);
1461 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
1462 	    RCR_PKT_BUF_ADDR_SHIFT);
1463 
1464 	channel = rcr_p->rdc;
1465 
1466 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1467 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1468 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
1469 	    "error_type 0x%x pkt_type 0x%x  "
1470 	    "pktbufsz_type %d ",
1471 	    rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len,
1472 	    multi, error_type, pkt_type, pktbufsz_type));
1473 
1474 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1475 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1476 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
1477 	    "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
1478 	    rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type, pkt_type));
1479 
1480 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1481 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1482 	    "full pkt_buf_addr_pp $%p l2_len %d",
1483 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1484 
1485 	/* get the stats ptr */
1486 	rdc_stats = rcr_p->rdc_stats;
1487 
1488 	if (!l2_len) {
1489 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1490 		    "<== hxge_receive_packet: failed: l2 length is 0."));
1491 		return;
1492 	}
1493 
1494 	/* shift 6 bits to get the full io address */
1495 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
1496 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
1497 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1498 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1499 	    "full pkt_buf_addr_pp $%p l2_len %d",
1500 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1501 
1502 	rx_rbr_p = rcr_p->rx_rbr_p;
1503 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
1504 
1505 	if (first_entry) {
1506 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
1507 		    RXDMA_HDR_SIZE_DEFAULT);
1508 
1509 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1510 		    "==> hxge_receive_packet: first entry 0x%016llx "
1511 		    "pkt_buf_addr_pp $%p l2_len %d hdr %d",
1512 		    rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size));
1513 	}
1514 
1515 	MUTEX_ENTER(&rcr_p->lock);
1516 	MUTEX_ENTER(&rx_rbr_p->lock);
1517 
1518 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1519 	    "==> (rbr 1) hxge_receive_packet: entry 0x%0llx "
1520 	    "full pkt_buf_addr_pp $%p l2_len %d",
1521 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1522 
1523 	/*
1524 	 * Packet buffer address in the completion entry points to the starting
1525 	 * buffer address (offset 0). Use the starting buffer address to locate
1526 	 * the corresponding kernel address.
1527 	 */
1528 	status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p,
1529 	    pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
1530 	    &buf_offset, &msg_index);
1531 
1532 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1533 	    "==> (rbr 2) hxge_receive_packet: entry 0x%0llx "
1534 	    "full pkt_buf_addr_pp $%p l2_len %d",
1535 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1536 
1537 	if (status != HXGE_OK) {
1538 		MUTEX_EXIT(&rx_rbr_p->lock);
1539 		MUTEX_EXIT(&rcr_p->lock);
1540 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1541 		    "<== hxge_receive_packet: found vaddr failed %d", status));
1542 		return;
1543 	}
1544 
1545 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1546 	    "==> (rbr 3) hxge_receive_packet: entry 0x%0llx "
1547 	    "full pkt_buf_addr_pp $%p l2_len %d",
1548 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1549 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1550 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1551 	    "full pkt_buf_addr_pp $%p l2_len %d",
1552 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1553 
1554 	if (msg_index >= rx_rbr_p->tnblocks) {
1555 		MUTEX_EXIT(&rx_rbr_p->lock);
1556 		MUTEX_EXIT(&rcr_p->lock);
1557 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1558 		    "==> hxge_receive_packet: FATAL msg_index (%d) "
1559 		    "should be smaller than tnblocks (%d)\n",
1560 		    msg_index, rx_rbr_p->tnblocks));
1561 		return;
1562 	}
1563 
1564 	rx_msg_p = rx_msg_ring_p[msg_index];
1565 
1566 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1567 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1568 	    "full pkt_buf_addr_pp $%p l2_len %d",
1569 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1570 
1571 	switch (pktbufsz_type) {
1572 	case RCR_PKTBUFSZ_0:
1573 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
1574 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1575 		    "==> hxge_receive_packet: 0 buf %d", bsize));
1576 		break;
1577 	case RCR_PKTBUFSZ_1:
1578 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
1579 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1580 		    "==> hxge_receive_packet: 1 buf %d", bsize));
1581 		break;
1582 	case RCR_PKTBUFSZ_2:
1583 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
1584 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1585 		    "==> hxge_receive_packet: 2 buf %d", bsize));
1586 		break;
1587 	case RCR_SINGLE_BLOCK:
1588 		bsize = rx_msg_p->block_size;
1589 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1590 		    "==> hxge_receive_packet: single %d", bsize));
1591 
1592 		break;
1593 	default:
1594 		MUTEX_EXIT(&rx_rbr_p->lock);
1595 		MUTEX_EXIT(&rcr_p->lock);
1596 		return;
1597 	}
1598 
1599 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
1600 	    (buf_offset + sw_offset_bytes), (hdr_size + l2_len),
1601 	    DDI_DMA_SYNC_FORCPU);
1602 
1603 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1604 	    "==> hxge_receive_packet: after first dump:usage count"));
1605 
1606 	if (rx_msg_p->cur_usage_cnt == 0) {
1607 		if (rx_rbr_p->rbr_use_bcopy) {
1608 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
1609 			if (rx_rbr_p->rbr_consumed <
1610 			    rx_rbr_p->rbr_threshold_hi) {
1611 				if (rx_rbr_p->rbr_threshold_lo == 0 ||
1612 				    ((rx_rbr_p->rbr_consumed >=
1613 				    rx_rbr_p->rbr_threshold_lo) &&
1614 				    (rx_rbr_p->rbr_bufsize_type >=
1615 				    pktbufsz_type))) {
1616 					rx_msg_p->rx_use_bcopy = B_TRUE;
1617 				}
1618 			} else {
1619 				rx_msg_p->rx_use_bcopy = B_TRUE;
1620 			}
1621 		}
1622 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1623 		    "==> hxge_receive_packet: buf %d (new block) ", bsize));
1624 
1625 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
1626 		rx_msg_p->pkt_buf_size = bsize;
1627 		rx_msg_p->cur_usage_cnt = 1;
1628 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
1629 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1630 			    "==> hxge_receive_packet: buf %d (single block) ",
1631 			    bsize));
1632 			/*
1633 			 * Buffer can be reused once the free function is
1634 			 * called.
1635 			 */
1636 			rx_msg_p->max_usage_cnt = 1;
1637 			buffer_free = B_TRUE;
1638 		} else {
1639 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize;
1640 			if (rx_msg_p->max_usage_cnt == 1) {
1641 				buffer_free = B_TRUE;
1642 			}
1643 		}
1644 	} else {
1645 		rx_msg_p->cur_usage_cnt++;
1646 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
1647 			buffer_free = B_TRUE;
1648 		}
1649 	}
1650 
1651 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1652 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
1653 	    msg_index, l2_len,
1654 	    rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
1655 
1656 	if (error_type) {
1657 		rdc_stats->ierrors++;
1658 		/* Update error stats */
1659 		rdc_stats->errlog.compl_err_type = error_type;
1660 		HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR);
1661 
1662 		if (error_type & RCR_CTRL_FIFO_DED) {
1663 			rdc_stats->ctrl_fifo_ecc_err++;
1664 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1665 			    " hxge_receive_packet: "
1666 			    " channel %d RCR ctrl_fifo_ded error", channel));
1667 		} else if (error_type & RCR_DATA_FIFO_DED) {
1668 			rdc_stats->data_fifo_ecc_err++;
1669 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1670 			    " hxge_receive_packet: channel %d"
1671 			    " RCR data_fifo_ded error", channel));
1672 		}
1673 
1674 		/*
1675 		 * Update and repost buffer block if max usage count is
1676 		 * reached.
1677 		 */
1678 		if (error_send_up == B_FALSE) {
1679 			atomic_inc_32(&rx_msg_p->ref_cnt);
1680 			atomic_inc_32(&hxge_mblks_pending);
1681 			if (buffer_free == B_TRUE) {
1682 				rx_msg_p->free = B_TRUE;
1683 			}
1684 
1685 			MUTEX_EXIT(&rx_rbr_p->lock);
1686 			MUTEX_EXIT(&rcr_p->lock);
1687 			hxge_freeb(rx_msg_p);
1688 			return;
1689 		}
1690 	}
1691 
1692 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1693 	    "==> hxge_receive_packet: DMA sync second "));
1694 
1695 	bytes_read = rcr_p->rcvd_pkt_bytes;
1696 	skip_len = sw_offset_bytes + hdr_size;
1697 	if (!rx_msg_p->rx_use_bcopy) {
1698 		/*
1699 		 * For loaned up buffers, the driver reference count
1700 		 * will be incremented first and then the free state.
1701 		 */
1702 		if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
1703 			if (first_entry) {
1704 				nmp->b_rptr = &nmp->b_rptr[skip_len];
1705 				if (l2_len < bsize - skip_len) {
1706 					nmp->b_wptr = &nmp->b_rptr[l2_len];
1707 				} else {
1708 					nmp->b_wptr = &nmp->b_rptr[bsize
1709 					    - skip_len];
1710 				}
1711 			} else {
1712 				if (l2_len - bytes_read < bsize) {
1713 					nmp->b_wptr =
1714 					    &nmp->b_rptr[l2_len - bytes_read];
1715 				} else {
1716 					nmp->b_wptr = &nmp->b_rptr[bsize];
1717 				}
1718 			}
1719 		}
1720 	} else {
1721 		if (first_entry) {
1722 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
1723 			    l2_len < bsize - skip_len ?
1724 			    l2_len : bsize - skip_len);
1725 		} else {
1726 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset,
1727 			    l2_len - bytes_read < bsize ?
1728 			    l2_len - bytes_read : bsize);
1729 		}
1730 	}
1731 
1732 	if (nmp != NULL) {
1733 		if (first_entry)
1734 			bytes_read  = nmp->b_wptr - nmp->b_rptr;
1735 		else
1736 			bytes_read += nmp->b_wptr - nmp->b_rptr;
1737 
1738 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1739 		    "==> hxge_receive_packet after dupb: "
1740 		    "rbr consumed %d "
1741 		    "pktbufsz_type %d "
1742 		    "nmp $%p rptr $%p wptr $%p "
1743 		    "buf_offset %d bzise %d l2_len %d skip_len %d",
1744 		    rx_rbr_p->rbr_consumed,
1745 		    pktbufsz_type,
1746 		    nmp, nmp->b_rptr, nmp->b_wptr,
1747 		    buf_offset, bsize, l2_len, skip_len));
1748 	} else {
1749 		cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)");
1750 
1751 		atomic_inc_32(&rx_msg_p->ref_cnt);
1752 		atomic_inc_32(&hxge_mblks_pending);
1753 		if (buffer_free == B_TRUE) {
1754 			rx_msg_p->free = B_TRUE;
1755 		}
1756 
1757 		MUTEX_EXIT(&rx_rbr_p->lock);
1758 		MUTEX_EXIT(&rcr_p->lock);
1759 		hxge_freeb(rx_msg_p);
1760 		return;
1761 	}
1762 
1763 	if (buffer_free == B_TRUE) {
1764 		rx_msg_p->free = B_TRUE;
1765 	}
1766 
1767 	/*
1768 	 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a
1769 	 * packet is not fragmented and no error bit is set, then L4 checksum
1770 	 * is OK.
1771 	 */
1772 	is_valid = (nmp != NULL);
1773 	if (first_entry) {
1774 		rdc_stats->ipackets++; /* count only 1st seg for jumbo */
1775 		rdc_stats->ibytes += skip_len + l2_len < bsize ?
1776 		    l2_len : bsize;
1777 	} else {
1778 		rdc_stats->ibytes += l2_len - bytes_read < bsize ?
1779 		    l2_len - bytes_read : bsize;
1780 	}
1781 
1782 	rcr_p->rcvd_pkt_bytes = bytes_read;
1783 
1784 	MUTEX_EXIT(&rx_rbr_p->lock);
1785 	MUTEX_EXIT(&rcr_p->lock);
1786 
1787 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
1788 		atomic_inc_32(&rx_msg_p->ref_cnt);
1789 		atomic_inc_32(&hxge_mblks_pending);
1790 		hxge_freeb(rx_msg_p);
1791 	}
1792 
1793 	if (is_valid) {
1794 		nmp->b_cont = NULL;
1795 		if (first_entry) {
1796 			*mp = nmp;
1797 			*mp_cont = NULL;
1798 		} else {
1799 			*mp_cont = nmp;
1800 		}
1801 	}
1802 
1803 	/*
1804 	 * Update stats and hardware checksuming.
1805 	 */
1806 	if (is_valid && !multi) {
1807 
1808 		is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
1809 		    pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE);
1810 
1811 		HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_receive_packet: "
1812 		    "is_valid 0x%x multi %d pkt %d d error %d",
1813 		    is_valid, multi, is_tcp_udp, error_type));
1814 
1815 		if (is_tcp_udp && !error_type) {
1816 			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
1817 			    HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
1818 
1819 			HXGE_DEBUG_MSG((hxgep, RX_CTL,
1820 			    "==> hxge_receive_packet: Full tcp/udp cksum "
1821 			    "is_valid 0x%x multi %d pkt %d "
1822 			    "error %d",
1823 			    is_valid, multi, is_tcp_udp, error_type));
1824 		}
1825 	}
1826 
1827 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1828 	    "==> hxge_receive_packet: *mp 0x%016llx", *mp));
1829 
1830 	*multi_p = (multi == RCR_MULTI_MASK);
1831 
1832 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: "
1833 	    "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
1834 	    *multi_p, nmp, *mp, *mp_cont));
1835 }
1836 
1837 /*ARGSUSED*/
1838 static hxge_status_t
1839 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
1840     rdc_stat_t cs)
1841 {
1842 	p_hxge_rx_ring_stats_t	rdc_stats;
1843 	hpi_handle_t		handle;
1844 	boolean_t		rxchan_fatal = B_FALSE;
1845 	uint8_t			channel;
1846 	hxge_status_t		status = HXGE_OK;
1847 	uint64_t		cs_val;
1848 
1849 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts"));
1850 
1851 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1852 	channel = ldvp->channel;
1853 
1854 	/* Clear the interrupts */
1855 	cs_val = cs.value & RDC_STAT_WR1C;
1856 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val);
1857 
1858 	rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index];
1859 
1860 	if (cs.bits.rbr_cpl_to) {
1861 		rdc_stats->rbr_tmout++;
1862 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1863 		    HXGE_FM_EREPORT_RDMC_RBR_CPL_TO);
1864 		rxchan_fatal = B_TRUE;
1865 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1866 		    "==> hxge_rx_err_evnts(channel %d): "
1867 		    "fatal error: rx_rbr_timeout", channel));
1868 	}
1869 
1870 	if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) {
1871 		(void) hpi_rxdma_ring_perr_stat_get(handle,
1872 		    &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par);
1873 	}
1874 
1875 	if (cs.bits.rcr_shadow_par_err) {
1876 		rdc_stats->rcr_sha_par++;
1877 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1878 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
1879 		rxchan_fatal = B_TRUE;
1880 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1881 		    "==> hxge_rx_err_evnts(channel %d): "
1882 		    "fatal error: rcr_shadow_par_err", channel));
1883 	}
1884 
1885 	if (cs.bits.rbr_prefetch_par_err) {
1886 		rdc_stats->rbr_pre_par++;
1887 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1888 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
1889 		rxchan_fatal = B_TRUE;
1890 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1891 		    "==> hxge_rx_err_evnts(channel %d): "
1892 		    "fatal error: rbr_prefetch_par_err", channel));
1893 	}
1894 
1895 	if (cs.bits.rbr_pre_empty) {
1896 		rdc_stats->rbr_pre_empty++;
1897 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1898 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY);
1899 		rxchan_fatal = B_TRUE;
1900 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1901 		    "==> hxge_rx_err_evnts(channel %d): "
1902 		    "fatal error: rbr_pre_empty", channel));
1903 	}
1904 
1905 	if (cs.bits.peu_resp_err) {
1906 		rdc_stats->peu_resp_err++;
1907 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1908 		    HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR);
1909 		rxchan_fatal = B_TRUE;
1910 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1911 		    "==> hxge_rx_err_evnts(channel %d): "
1912 		    "fatal error: peu_resp_err", channel));
1913 	}
1914 
1915 	if (cs.bits.rcr_thres) {
1916 		rdc_stats->rcr_thres++;
1917 		if (rdc_stats->rcr_thres == 1)
1918 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1919 			    "==> hxge_rx_err_evnts(channel %d): rcr_thres",
1920 			    channel));
1921 	}
1922 
1923 	if (cs.bits.rcr_to) {
1924 		rdc_stats->rcr_to++;
1925 		if (rdc_stats->rcr_to == 1)
1926 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1927 			    "==> hxge_rx_err_evnts(channel %d): rcr_to",
1928 			    channel));
1929 	}
1930 
1931 	if (cs.bits.rcr_shadow_full) {
1932 		rdc_stats->rcr_shadow_full++;
1933 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1934 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL);
1935 		rxchan_fatal = B_TRUE;
1936 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1937 		    "==> hxge_rx_err_evnts(channel %d): "
1938 		    "fatal error: rcr_shadow_full", channel));
1939 	}
1940 
1941 	if (cs.bits.rcr_full) {
1942 		rdc_stats->rcrfull++;
1943 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1944 		    HXGE_FM_EREPORT_RDMC_RCRFULL);
1945 		rxchan_fatal = B_TRUE;
1946 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1947 		    "==> hxge_rx_err_evnts(channel %d): "
1948 		    "fatal error: rcrfull error", channel));
1949 	}
1950 
1951 	if (cs.bits.rbr_empty) {
1952 		rdc_stats->rbr_empty++;
1953 		if (rdc_stats->rbr_empty == 1)
1954 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1955 			    "==> hxge_rx_err_evnts(channel %d): "
1956 			    "rbr empty error", channel));
1957 		/*
1958 		 * DMA channel is disabled due to rbr_empty bit is set
1959 		 * although it is not fatal. Enable the DMA channel here
1960 		 * to work-around the hardware bug.
1961 		 */
1962 		(void) hpi_rxdma_cfg_rdc_enable(handle, channel);
1963 	}
1964 
1965 	if (cs.bits.rbr_full) {
1966 		rdc_stats->rbrfull++;
1967 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1968 		    HXGE_FM_EREPORT_RDMC_RBRFULL);
1969 		rxchan_fatal = B_TRUE;
1970 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1971 		    "==> hxge_rx_err_evnts(channel %d): "
1972 		    "fatal error: rbr_full error", channel));
1973 	}
1974 
1975 	if (rxchan_fatal) {
1976 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1977 		    " hxge_rx_err_evnts: fatal error on Channel #%d\n",
1978 		    channel));
1979 		status = hxge_rxdma_fatal_err_recover(hxgep, channel);
1980 		if (status == HXGE_OK) {
1981 			FM_SERVICE_RESTORED(hxgep);
1982 		}
1983 	}
1984 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rx_err_evnts"));
1985 
1986 	return (status);
1987 }
1988 
1989 static hxge_status_t
1990 hxge_map_rxdma(p_hxge_t hxgep)
1991 {
1992 	int			i, ndmas;
1993 	uint16_t		channel;
1994 	p_rx_rbr_rings_t	rx_rbr_rings;
1995 	p_rx_rbr_ring_t		*rbr_rings;
1996 	p_rx_rcr_rings_t	rx_rcr_rings;
1997 	p_rx_rcr_ring_t		*rcr_rings;
1998 	p_rx_mbox_areas_t	rx_mbox_areas_p;
1999 	p_rx_mbox_t		*rx_mbox_p;
2000 	p_hxge_dma_pool_t	dma_buf_poolp;
2001 	p_hxge_dma_pool_t	dma_cntl_poolp;
2002 	p_hxge_dma_common_t	*dma_buf_p;
2003 	p_hxge_dma_common_t	*dma_cntl_p;
2004 	uint32_t		*num_chunks;
2005 	hxge_status_t		status = HXGE_OK;
2006 
2007 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma"));
2008 
2009 	dma_buf_poolp = hxgep->rx_buf_pool_p;
2010 	dma_cntl_poolp = hxgep->rx_cntl_pool_p;
2011 
2012 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
2013 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2014 		    "<== hxge_map_rxdma: buf not allocated"));
2015 		return (HXGE_ERROR);
2016 	}
2017 
2018 	ndmas = dma_buf_poolp->ndmas;
2019 	if (!ndmas) {
2020 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2021 		    "<== hxge_map_rxdma: no dma allocated"));
2022 		return (HXGE_ERROR);
2023 	}
2024 
2025 	num_chunks = dma_buf_poolp->num_chunks;
2026 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2027 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2028 	rx_rbr_rings = (p_rx_rbr_rings_t)
2029 	    KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2030 	rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC(
2031 	    sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
2032 
2033 	rx_rcr_rings = (p_rx_rcr_rings_t)
2034 	    KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2035 	rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC(
2036 	    sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
2037 
2038 	rx_mbox_areas_p = (p_rx_mbox_areas_t)
2039 	    KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2040 	rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC(
2041 	    sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
2042 
2043 	/*
2044 	 * Timeout should be set based on the system clock divider.
2045 	 * The following timeout value of 1 assumes that the
2046 	 * granularity (1000) is 3 microseconds running at 300MHz.
2047 	 */
2048 
2049 	hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
2050 	hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
2051 
2052 	/*
2053 	 * Map descriptors from the buffer polls for each dam channel.
2054 	 */
2055 	for (i = 0; i < ndmas; i++) {
2056 		/*
2057 		 * Set up and prepare buffer blocks, descriptors and mailbox.
2058 		 */
2059 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2060 		status = hxge_map_rxdma_channel(hxgep, channel,
2061 		    (p_hxge_dma_common_t *)&dma_buf_p[i],
2062 		    (p_rx_rbr_ring_t *)&rbr_rings[i],
2063 		    num_chunks[i], (p_hxge_dma_common_t *)&dma_cntl_p[i],
2064 		    (p_rx_rcr_ring_t *)&rcr_rings[i],
2065 		    (p_rx_mbox_t *)&rx_mbox_p[i]);
2066 		if (status != HXGE_OK) {
2067 			goto hxge_map_rxdma_fail1;
2068 		}
2069 		rbr_rings[i]->index = (uint16_t)i;
2070 		rcr_rings[i]->index = (uint16_t)i;
2071 		rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i];
2072 	}
2073 
2074 	rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
2075 	rx_rbr_rings->rbr_rings = rbr_rings;
2076 	hxgep->rx_rbr_rings = rx_rbr_rings;
2077 	rx_rcr_rings->rcr_rings = rcr_rings;
2078 	hxgep->rx_rcr_rings = rx_rcr_rings;
2079 
2080 	rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
2081 	hxgep->rx_mbox_areas_p = rx_mbox_areas_p;
2082 
2083 	goto hxge_map_rxdma_exit;
2084 
2085 hxge_map_rxdma_fail1:
2086 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2087 	    "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)",
2088 	    status, channel, i));
2089 	i--;
2090 	for (; i >= 0; i--) {
2091 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2092 		hxge_unmap_rxdma_channel(hxgep, channel,
2093 		    rbr_rings[i], rcr_rings[i], rx_mbox_p[i]);
2094 	}
2095 
2096 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2097 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2098 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2099 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2100 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2101 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2102 
2103 hxge_map_rxdma_exit:
2104 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2105 	    "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel));
2106 
2107 	return (status);
2108 }
2109 
2110 static void
2111 hxge_unmap_rxdma(p_hxge_t hxgep)
2112 {
2113 	int			i, ndmas;
2114 	uint16_t		channel;
2115 	p_rx_rbr_rings_t	rx_rbr_rings;
2116 	p_rx_rbr_ring_t		*rbr_rings;
2117 	p_rx_rcr_rings_t	rx_rcr_rings;
2118 	p_rx_rcr_ring_t		*rcr_rings;
2119 	p_rx_mbox_areas_t	rx_mbox_areas_p;
2120 	p_rx_mbox_t		*rx_mbox_p;
2121 	p_hxge_dma_pool_t	dma_buf_poolp;
2122 	p_hxge_dma_pool_t	dma_cntl_poolp;
2123 	p_hxge_dma_common_t	*dma_buf_p;
2124 
2125 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma"));
2126 
2127 	dma_buf_poolp = hxgep->rx_buf_pool_p;
2128 	dma_cntl_poolp = hxgep->rx_cntl_pool_p;
2129 
2130 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
2131 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2132 		    "<== hxge_unmap_rxdma: NULL buf pointers"));
2133 		return;
2134 	}
2135 
2136 	rx_rbr_rings = hxgep->rx_rbr_rings;
2137 	rx_rcr_rings = hxgep->rx_rcr_rings;
2138 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
2139 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2140 		    "<== hxge_unmap_rxdma: NULL ring pointers"));
2141 		return;
2142 	}
2143 
2144 	ndmas = rx_rbr_rings->ndmas;
2145 	if (!ndmas) {
2146 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2147 		    "<== hxge_unmap_rxdma: no channel"));
2148 		return;
2149 	}
2150 
2151 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2152 	    "==> hxge_unmap_rxdma (ndmas %d)", ndmas));
2153 
2154 	rbr_rings = rx_rbr_rings->rbr_rings;
2155 	rcr_rings = rx_rcr_rings->rcr_rings;
2156 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
2157 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
2158 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2159 
2160 	for (i = 0; i < ndmas; i++) {
2161 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2162 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2163 		    "==> hxge_unmap_rxdma (ndmas %d) channel %d",
2164 		    ndmas, channel));
2165 		(void) hxge_unmap_rxdma_channel(hxgep, channel,
2166 		    (p_rx_rbr_ring_t)rbr_rings[i],
2167 		    (p_rx_rcr_ring_t)rcr_rings[i],
2168 		    (p_rx_mbox_t)rx_mbox_p[i]);
2169 	}
2170 
2171 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2172 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2173 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2174 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2175 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2176 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2177 
2178 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma"));
2179 }
2180 
2181 hxge_status_t
2182 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2183     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
2184     uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
2185     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2186 {
2187 	int status = HXGE_OK;
2188 
2189 	/*
2190 	 * Set up and prepare buffer blocks, descriptors and mailbox.
2191 	 */
2192 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2193 	    "==> hxge_map_rxdma_channel (channel %d)", channel));
2194 
2195 	/*
2196 	 * Receive buffer blocks
2197 	 */
2198 	status = hxge_map_rxdma_channel_buf_ring(hxgep, channel,
2199 	    dma_buf_p, rbr_p, num_chunks);
2200 	if (status != HXGE_OK) {
2201 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2202 		    "==> hxge_map_rxdma_channel (channel %d): "
2203 		    "map buffer failed 0x%x", channel, status));
2204 		goto hxge_map_rxdma_channel_exit;
2205 	}
2206 
2207 	/*
2208 	 * Receive block ring, completion ring and mailbox.
2209 	 */
2210 	status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel,
2211 	    dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
2212 	if (status != HXGE_OK) {
2213 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2214 		    "==> hxge_map_rxdma_channel (channel %d): "
2215 		    "map config failed 0x%x", channel, status));
2216 		goto hxge_map_rxdma_channel_fail2;
2217 	}
2218 	goto hxge_map_rxdma_channel_exit;
2219 
2220 hxge_map_rxdma_channel_fail3:
2221 	/* Free rbr, rcr */
2222 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2223 	    "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)",
2224 	    status, channel));
2225 	hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p);
2226 
2227 hxge_map_rxdma_channel_fail2:
2228 	/* Free buffer blocks */
2229 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2230 	    "==> hxge_map_rxdma_channel: free rx buffers"
2231 	    "(hxgep 0x%x status 0x%x channel %d)",
2232 	    hxgep, status, channel));
2233 	hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p);
2234 
2235 	status = HXGE_ERROR;
2236 
2237 hxge_map_rxdma_channel_exit:
2238 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2239 	    "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)",
2240 	    hxgep, status, channel));
2241 
2242 	return (status);
2243 }
2244 
2245 /*ARGSUSED*/
2246 static void
2247 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2248     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2249 {
2250 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2251 	    "==> hxge_unmap_rxdma_channel (channel %d)", channel));
2252 
2253 	/*
2254 	 * unmap receive block ring, completion ring and mailbox.
2255 	 */
2256 	(void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p);
2257 
2258 	/* unmap buffer blocks */
2259 	(void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p);
2260 
2261 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel"));
2262 }
2263 
2264 /*ARGSUSED*/
2265 static hxge_status_t
2266 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
2267     p_hxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
2268     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2269 {
2270 	p_rx_rbr_ring_t 	rbrp;
2271 	p_rx_rcr_ring_t 	rcrp;
2272 	p_rx_mbox_t 		mboxp;
2273 	p_hxge_dma_common_t 	cntl_dmap;
2274 	p_hxge_dma_common_t 	dmap;
2275 	p_rx_msg_t 		*rx_msg_ring;
2276 	p_rx_msg_t 		rx_msg_p;
2277 	rdc_rbr_cfg_a_t		*rcfga_p;
2278 	rdc_rbr_cfg_b_t		*rcfgb_p;
2279 	rdc_rcr_cfg_a_t		*cfga_p;
2280 	rdc_rcr_cfg_b_t		*cfgb_p;
2281 	rdc_rx_cfg1_t		*cfig1_p;
2282 	rdc_rx_cfg2_t		*cfig2_p;
2283 	rdc_rbr_kick_t		*kick_p;
2284 	uint32_t		dmaaddrp;
2285 	uint32_t		*rbr_vaddrp;
2286 	uint32_t		bkaddr;
2287 	hxge_status_t		status = HXGE_OK;
2288 	int			i;
2289 	uint32_t 		hxge_port_rcr_size;
2290 
2291 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2292 	    "==> hxge_map_rxdma_channel_cfg_ring"));
2293 
2294 	cntl_dmap = *dma_cntl_p;
2295 
2296 	/* Map in the receive block ring */
2297 	rbrp = *rbr_p;
2298 	dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc;
2299 	hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
2300 
2301 	/*
2302 	 * Zero out buffer block ring descriptors.
2303 	 */
2304 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2305 
2306 	rcfga_p = &(rbrp->rbr_cfga);
2307 	rcfgb_p = &(rbrp->rbr_cfgb);
2308 	kick_p = &(rbrp->rbr_kick);
2309 	rcfga_p->value = 0;
2310 	rcfgb_p->value = 0;
2311 	kick_p->value = 0;
2312 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
2313 	rcfga_p->value = (rbrp->rbr_addr &
2314 	    (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK));
2315 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
2316 
2317 	/* XXXX: how to choose packet buffer sizes */
2318 	rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0;
2319 	rcfgb_p->bits.vld0 = 1;
2320 	rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1;
2321 	rcfgb_p->bits.vld1 = 1;
2322 	rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2;
2323 	rcfgb_p->bits.vld2 = 1;
2324 	rcfgb_p->bits.bksize = hxgep->rx_bksize_code;
2325 
2326 	/*
2327 	 * For each buffer block, enter receive block address to the ring.
2328 	 */
2329 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
2330 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
2331 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2332 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2333 	    "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
2334 
2335 	rx_msg_ring = rbrp->rx_msg_ring;
2336 	for (i = 0; i < rbrp->tnblocks; i++) {
2337 		rx_msg_p = rx_msg_ring[i];
2338 		rx_msg_p->hxgep = hxgep;
2339 		rx_msg_p->rx_rbr_p = rbrp;
2340 		bkaddr = (uint32_t)
2341 		    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2342 		    RBR_BKADDR_SHIFT));
2343 		rx_msg_p->free = B_FALSE;
2344 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
2345 
2346 		*rbr_vaddrp++ = bkaddr;
2347 	}
2348 
2349 	kick_p->bits.bkadd = rbrp->rbb_max;
2350 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
2351 
2352 	rbrp->rbr_rd_index = 0;
2353 
2354 	rbrp->rbr_consumed = 0;
2355 	rbrp->rbr_use_bcopy = B_TRUE;
2356 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
2357 
2358 	/*
2359 	 * Do bcopy on packets greater than bcopy size once the lo threshold is
2360 	 * reached. This lo threshold should be less than the hi threshold.
2361 	 *
2362 	 * Do bcopy on every packet once the hi threshold is reached.
2363 	 */
2364 	if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) {
2365 		/* default it to use hi */
2366 		hxge_rx_threshold_lo = hxge_rx_threshold_hi;
2367 	}
2368 	if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) {
2369 		hxge_rx_buf_size_type = HXGE_RBR_TYPE2;
2370 	}
2371 	rbrp->rbr_bufsize_type = hxge_rx_buf_size_type;
2372 
2373 	switch (hxge_rx_threshold_hi) {
2374 	default:
2375 	case HXGE_RX_COPY_NONE:
2376 		/* Do not do bcopy at all */
2377 		rbrp->rbr_use_bcopy = B_FALSE;
2378 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
2379 		break;
2380 
2381 	case HXGE_RX_COPY_1:
2382 	case HXGE_RX_COPY_2:
2383 	case HXGE_RX_COPY_3:
2384 	case HXGE_RX_COPY_4:
2385 	case HXGE_RX_COPY_5:
2386 	case HXGE_RX_COPY_6:
2387 	case HXGE_RX_COPY_7:
2388 		rbrp->rbr_threshold_hi =
2389 		    rbrp->rbb_max * (hxge_rx_threshold_hi) /
2390 		    HXGE_RX_BCOPY_SCALE;
2391 		break;
2392 
2393 	case HXGE_RX_COPY_ALL:
2394 		rbrp->rbr_threshold_hi = 0;
2395 		break;
2396 	}
2397 
2398 	switch (hxge_rx_threshold_lo) {
2399 	default:
2400 	case HXGE_RX_COPY_NONE:
2401 		/* Do not do bcopy at all */
2402 		if (rbrp->rbr_use_bcopy) {
2403 			rbrp->rbr_use_bcopy = B_FALSE;
2404 		}
2405 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
2406 		break;
2407 
2408 	case HXGE_RX_COPY_1:
2409 	case HXGE_RX_COPY_2:
2410 	case HXGE_RX_COPY_3:
2411 	case HXGE_RX_COPY_4:
2412 	case HXGE_RX_COPY_5:
2413 	case HXGE_RX_COPY_6:
2414 	case HXGE_RX_COPY_7:
2415 		rbrp->rbr_threshold_lo =
2416 		    rbrp->rbb_max * (hxge_rx_threshold_lo) /
2417 		    HXGE_RX_BCOPY_SCALE;
2418 		break;
2419 
2420 	case HXGE_RX_COPY_ALL:
2421 		rbrp->rbr_threshold_lo = 0;
2422 		break;
2423 	}
2424 
2425 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
2426 	    "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d "
2427 	    "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d "
2428 	    "rbb_threshold_lo %d",
2429 	    dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type,
2430 	    rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo));
2431 
2432 	/* Map in the receive completion ring */
2433 	rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
2434 	rcrp->rdc = dma_channel;
2435 
2436 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
2437 	rcrp->comp_size = hxge_port_rcr_size;
2438 	rcrp->comp_wrap_mask = hxge_port_rcr_size - 1;
2439 
2440 	rcrp->max_receive_pkts = hxge_max_rx_pkts;
2441 
2442 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
2443 	hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
2444 	    sizeof (rcr_entry_t));
2445 	rcrp->comp_rd_index = 0;
2446 	rcrp->comp_wt_index = 0;
2447 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
2448 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
2449 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
2450 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
2451 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
2452 	    (hxge_port_rcr_size - 1);
2453 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
2454 	    (hxge_port_rcr_size - 1);
2455 
2456 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2457 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2458 	    "rbr_vaddrp $%p rcr_desc_rd_head_p $%p "
2459 	    "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p "
2460 	    "rcr_desc_rd_last_pp $%p ",
2461 	    dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p,
2462 	    rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p,
2463 	    rcrp->rcr_desc_last_pp));
2464 
2465 	/*
2466 	 * Zero out buffer block ring descriptors.
2467 	 */
2468 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2469 	rcrp->intr_timeout = hxgep->intr_timeout;
2470 	rcrp->intr_threshold = hxgep->intr_threshold;
2471 	rcrp->full_hdr_flag = B_FALSE;
2472 	rcrp->sw_priv_hdr_len = 0;
2473 
2474 	cfga_p = &(rcrp->rcr_cfga);
2475 	cfgb_p = &(rcrp->rcr_cfgb);
2476 	cfga_p->value = 0;
2477 	cfgb_p->value = 0;
2478 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
2479 
2480 	cfga_p->value = (rcrp->rcr_addr &
2481 	    (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK));
2482 
2483 	cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF);
2484 
2485 	/*
2486 	 * Timeout should be set based on the system clock divider. The
2487 	 * following timeout value of 1 assumes that the granularity (1000) is
2488 	 * 3 microseconds running at 300MHz.
2489 	 */
2490 	cfgb_p->bits.pthres = rcrp->intr_threshold;
2491 	cfgb_p->bits.timeout = rcrp->intr_timeout;
2492 	cfgb_p->bits.entout = 1;
2493 
2494 	/* Map in the mailbox */
2495 	mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
2496 	dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox;
2497 	hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
2498 	cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1;
2499 	cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2;
2500 	cfig1_p->value = cfig2_p->value = 0;
2501 
2502 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
2503 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2504 	    "==> hxge_map_rxdma_channel_cfg_ring: "
2505 	    "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
2506 	    dma_channel, cfig1_p->value, cfig2_p->value,
2507 	    mboxp->mbox_addr));
2508 
2509 	dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff);
2510 	cfig1_p->bits.mbaddr_h = dmaaddrp;
2511 
2512 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
2513 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
2514 	    RXDMA_CFIG2_MBADDR_L_MASK);
2515 
2516 	cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
2517 
2518 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2519 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p "
2520 	    "cfg1 0x%016llx cfig2 0x%016llx",
2521 	    dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value));
2522 
2523 	cfig2_p->bits.full_hdr = rcrp->full_hdr_flag;
2524 	cfig2_p->bits.offset = rcrp->sw_priv_hdr_len;
2525 
2526 	rbrp->rx_rcr_p = rcrp;
2527 	rcrp->rx_rbr_p = rbrp;
2528 	*rcr_p = rcrp;
2529 	*rx_mbox_p = mboxp;
2530 
2531 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2532 	    "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
2533 	return (status);
2534 }
2535 
2536 /*ARGSUSED*/
2537 static void
2538 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
2539     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2540 {
2541 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2542 	    "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc));
2543 
2544 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
2545 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
2546 
2547 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2548 	    "<== hxge_unmap_rxdma_channel_cfg_ring"));
2549 }
2550 
2551 static hxge_status_t
2552 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
2553     p_hxge_dma_common_t *dma_buf_p,
2554     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
2555 {
2556 	p_rx_rbr_ring_t		rbrp;
2557 	p_hxge_dma_common_t	dma_bufp, tmp_bufp;
2558 	p_rx_msg_t		*rx_msg_ring;
2559 	p_rx_msg_t		rx_msg_p;
2560 	p_mblk_t		mblk_p;
2561 
2562 	rxring_info_t *ring_info;
2563 	hxge_status_t status = HXGE_OK;
2564 	int i, j, index;
2565 	uint32_t size, bsize, nblocks, nmsgs;
2566 
2567 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2568 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel));
2569 
2570 	dma_bufp = tmp_bufp = *dma_buf_p;
2571 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2572 	    " hxge_map_rxdma_channel_buf_ring: channel %d to map %d "
2573 	    "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp));
2574 
2575 	nmsgs = 0;
2576 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2577 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2578 		    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2579 		    "bufp 0x%016llx nblocks %d nmsgs %d",
2580 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2581 		nmsgs += tmp_bufp->nblocks;
2582 	}
2583 	if (!nmsgs) {
2584 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2585 		    "<== hxge_map_rxdma_channel_buf_ring: channel %d "
2586 		    "no msg blocks", channel));
2587 		status = HXGE_ERROR;
2588 		goto hxge_map_rxdma_channel_buf_ring_exit;
2589 	}
2590 	rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
2591 
2592 	size = nmsgs * sizeof (p_rx_msg_t);
2593 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2594 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
2595 	    KM_SLEEP);
2596 
2597 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
2598 	    (void *) hxgep->interrupt_cookie);
2599 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
2600 	    (void *) hxgep->interrupt_cookie);
2601 	rbrp->rdc = channel;
2602 	rbrp->num_blocks = num_chunks;
2603 	rbrp->tnblocks = nmsgs;
2604 	rbrp->rbb_max = nmsgs;
2605 	rbrp->rbr_max_size = nmsgs;
2606 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
2607 
2608 	rbrp->pages_to_post = 0;
2609 	rbrp->pages_to_skip = 20;
2610 	rbrp->pages_to_post_threshold = rbrp->rbb_max - rbrp->pages_to_skip / 2;
2611 
2612 	/*
2613 	 * Buffer sizes suggested by NIU architect. 256, 512 and 2K.
2614 	 */
2615 
2616 	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
2617 	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
2618 	rbrp->hpi_pkt_buf_size0 = SIZE_256B;
2619 
2620 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
2621 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
2622 	rbrp->hpi_pkt_buf_size1 = SIZE_1KB;
2623 
2624 	rbrp->block_size = hxgep->rx_default_block_size;
2625 
2626 	if (!hxge_jumbo_enable && !hxgep->param_arr[param_accept_jumbo].value) {
2627 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
2628 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
2629 		rbrp->hpi_pkt_buf_size2 = SIZE_2KB;
2630 	} else {
2631 		if (rbrp->block_size >= 0x2000) {
2632 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2633 			    "<== hxge_map_rxdma_channel_buf_ring: channel %d "
2634 			    "no msg blocks", channel));
2635 			status = HXGE_ERROR;
2636 			goto hxge_map_rxdma_channel_buf_ring_fail1;
2637 		} else {
2638 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
2639 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
2640 			rbrp->hpi_pkt_buf_size2 = SIZE_4KB;
2641 		}
2642 	}
2643 
2644 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2645 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2646 	    "actual rbr max %d rbb_max %d nmsgs %d "
2647 	    "rbrp->block_size %d default_block_size %d "
2648 	    "(config hxge_rbr_size %d hxge_rbr_spare_size %d)",
2649 	    channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
2650 	    rbrp->block_size, hxgep->rx_default_block_size,
2651 	    hxge_rbr_size, hxge_rbr_spare_size));
2652 
2653 	/*
2654 	 * Map in buffers from the buffer pool.
2655 	 * Note that num_blocks is the num_chunks. For Sparc, there is likely
2656 	 * only one chunk. For x86, there will be many chunks.
2657 	 * Loop over chunks.
2658 	 */
2659 	index = 0;
2660 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
2661 		bsize = dma_bufp->block_size;
2662 		nblocks = dma_bufp->nblocks;
2663 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
2664 		ring_info->buffer[i].buf_index = i;
2665 		ring_info->buffer[i].buf_size = dma_bufp->alength;
2666 		ring_info->buffer[i].start_index = index;
2667 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
2668 
2669 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2670 		    " hxge_map_rxdma_channel_buf_ring: map channel %d "
2671 		    "chunk %d nblocks %d chunk_size %x block_size 0x%x "
2672 		    "dma_bufp $%p dvma_addr $%p", channel, i,
2673 		    dma_bufp->nblocks,
2674 		    ring_info->buffer[i].buf_size, bsize, dma_bufp,
2675 		    ring_info->buffer[i].dvma_addr));
2676 
2677 		/* loop over blocks within a chunk */
2678 		for (j = 0; j < nblocks; j++) {
2679 			if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO,
2680 			    dma_bufp)) == NULL) {
2681 				HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2682 				    "allocb failed (index %d i %d j %d)",
2683 				    index, i, j));
2684 				goto hxge_map_rxdma_channel_buf_ring_fail1;
2685 			}
2686 			rx_msg_ring[index] = rx_msg_p;
2687 			rx_msg_p->block_index = index;
2688 			rx_msg_p->shifted_addr = (uint32_t)
2689 			    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2690 			    RBR_BKADDR_SHIFT));
2691 			/*
2692 			 * Too much output
2693 			 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2694 			 *	"index %d j %d rx_msg_p $%p mblk %p",
2695 			 *	index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
2696 			 */
2697 			mblk_p = rx_msg_p->rx_mblk_p;
2698 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
2699 
2700 			rbrp->rbr_ref_cnt++;
2701 			index++;
2702 			rx_msg_p->buf_dma.dma_channel = channel;
2703 		}
2704 	}
2705 	if (i < rbrp->num_blocks) {
2706 		goto hxge_map_rxdma_channel_buf_ring_fail1;
2707 	}
2708 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2709 	    "hxge_map_rxdma_channel_buf_ring: done buf init "
2710 	    "channel %d msg block entries %d", channel, index));
2711 	ring_info->block_size_mask = bsize - 1;
2712 	rbrp->rx_msg_ring = rx_msg_ring;
2713 	rbrp->dma_bufp = dma_buf_p;
2714 	rbrp->ring_info = ring_info;
2715 
2716 	status = hxge_rxbuf_index_info_init(hxgep, rbrp);
2717 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: "
2718 	    "channel %d done buf info init", channel));
2719 
2720 	/*
2721 	 * Finally, permit hxge_freeb() to call hxge_post_page().
2722 	 */
2723 	rbrp->rbr_state = RBR_POSTING;
2724 
2725 	*rbr_p = rbrp;
2726 
2727 	goto hxge_map_rxdma_channel_buf_ring_exit;
2728 
2729 hxge_map_rxdma_channel_buf_ring_fail1:
2730 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2731 	    " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
2732 	    channel, status));
2733 
2734 	index--;
2735 	for (; index >= 0; index--) {
2736 		rx_msg_p = rx_msg_ring[index];
2737 		if (rx_msg_p != NULL) {
2738 			hxge_freeb(rx_msg_p);
2739 			rx_msg_ring[index] = NULL;
2740 		}
2741 	}
2742 
2743 hxge_map_rxdma_channel_buf_ring_fail:
2744 	MUTEX_DESTROY(&rbrp->post_lock);
2745 	MUTEX_DESTROY(&rbrp->lock);
2746 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
2747 	KMEM_FREE(rx_msg_ring, size);
2748 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
2749 
2750 	status = HXGE_ERROR;
2751 
2752 hxge_map_rxdma_channel_buf_ring_exit:
2753 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2754 	    "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status));
2755 
2756 	return (status);
2757 }
2758 
2759 /*ARGSUSED*/
2760 static void
2761 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
2762     p_rx_rbr_ring_t rbr_p)
2763 {
2764 	p_rx_msg_t	*rx_msg_ring;
2765 	p_rx_msg_t	rx_msg_p;
2766 	rxring_info_t	*ring_info;
2767 	int		i;
2768 	uint32_t	size;
2769 
2770 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2771 	    "==> hxge_unmap_rxdma_channel_buf_ring"));
2772 	if (rbr_p == NULL) {
2773 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2774 		    "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
2775 		return;
2776 	}
2777 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2778 	    "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc));
2779 
2780 	rx_msg_ring = rbr_p->rx_msg_ring;
2781 	ring_info = rbr_p->ring_info;
2782 
2783 	if (rx_msg_ring == NULL || ring_info == NULL) {
2784 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2785 		    "<== hxge_unmap_rxdma_channel_buf_ring: "
2786 		    "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info));
2787 		return;
2788 	}
2789 
2790 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
2791 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2792 	    " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
2793 	    "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks,
2794 	    rbr_p->tnblocks, rbr_p->rbr_max_size, size));
2795 
2796 	for (i = 0; i < rbr_p->tnblocks; i++) {
2797 		rx_msg_p = rx_msg_ring[i];
2798 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2799 		    " hxge_unmap_rxdma_channel_buf_ring: "
2800 		    "rx_msg_p $%p", rx_msg_p));
2801 		if (rx_msg_p != NULL) {
2802 			hxge_freeb(rx_msg_p);
2803 			rx_msg_ring[i] = NULL;
2804 		}
2805 	}
2806 
2807 	/*
2808 	 * We no longer may use the mutex <post_lock>. By setting
2809 	 * <rbr_state> to anything but POSTING, we prevent
2810 	 * hxge_post_page() from accessing a dead mutex.
2811 	 */
2812 	rbr_p->rbr_state = RBR_UNMAPPING;
2813 	MUTEX_DESTROY(&rbr_p->post_lock);
2814 
2815 	MUTEX_DESTROY(&rbr_p->lock);
2816 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
2817 	KMEM_FREE(rx_msg_ring, size);
2818 
2819 	if (rbr_p->rbr_ref_cnt == 0) {
2820 		/* This is the normal state of affairs. */
2821 		KMEM_FREE(rbr_p, sizeof (*rbr_p));
2822 	} else {
2823 		/*
2824 		 * Some of our buffers are still being used.
2825 		 * Therefore, tell hxge_freeb() this ring is
2826 		 * unmapped, so it may free <rbr_p> for us.
2827 		 */
2828 		rbr_p->rbr_state = RBR_UNMAPPED;
2829 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2830 		    "unmap_rxdma_buf_ring: %d %s outstanding.",
2831 		    rbr_p->rbr_ref_cnt,
2832 		    rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
2833 	}
2834 
2835 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2836 	    "<== hxge_unmap_rxdma_channel_buf_ring"));
2837 }
2838 
2839 static hxge_status_t
2840 hxge_rxdma_hw_start_common(p_hxge_t hxgep)
2841 {
2842 	hxge_status_t status = HXGE_OK;
2843 
2844 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
2845 
2846 	/*
2847 	 * Load the sharable parameters by writing to the function zero control
2848 	 * registers. These FZC registers should be initialized only once for
2849 	 * the entire chip.
2850 	 */
2851 	(void) hxge_init_fzc_rx_common(hxgep);
2852 
2853 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
2854 
2855 	return (status);
2856 }
2857 
2858 static hxge_status_t
2859 hxge_rxdma_hw_start(p_hxge_t hxgep)
2860 {
2861 	int			i, ndmas;
2862 	uint16_t		channel;
2863 	p_rx_rbr_rings_t	rx_rbr_rings;
2864 	p_rx_rbr_ring_t		*rbr_rings;
2865 	p_rx_rcr_rings_t	rx_rcr_rings;
2866 	p_rx_rcr_ring_t		*rcr_rings;
2867 	p_rx_mbox_areas_t	rx_mbox_areas_p;
2868 	p_rx_mbox_t		*rx_mbox_p;
2869 	hxge_status_t		status = HXGE_OK;
2870 
2871 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start"));
2872 
2873 	rx_rbr_rings = hxgep->rx_rbr_rings;
2874 	rx_rcr_rings = hxgep->rx_rcr_rings;
2875 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
2876 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2877 		    "<== hxge_rxdma_hw_start: NULL ring pointers"));
2878 		return (HXGE_ERROR);
2879 	}
2880 
2881 	ndmas = rx_rbr_rings->ndmas;
2882 	if (ndmas == 0) {
2883 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2884 		    "<== hxge_rxdma_hw_start: no dma channel allocated"));
2885 		return (HXGE_ERROR);
2886 	}
2887 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2888 	    "==> hxge_rxdma_hw_start (ndmas %d)", ndmas));
2889 
2890 	/*
2891 	 * Scrub the RDC Rx DMA Prefetch Buffer Command.
2892 	 */
2893 	for (i = 0; i < 128; i++) {
2894 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i);
2895 	}
2896 
2897 	/*
2898 	 * Scrub Rx DMA Shadow Tail Command.
2899 	 */
2900 	for (i = 0; i < 64; i++) {
2901 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i);
2902 	}
2903 
2904 	/*
2905 	 * Scrub Rx DMA Control Fifo Command.
2906 	 */
2907 	for (i = 0; i < 512; i++) {
2908 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i);
2909 	}
2910 
2911 	/*
2912 	 * Scrub Rx DMA Data Fifo Command.
2913 	 */
2914 	for (i = 0; i < 1536; i++) {
2915 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i);
2916 	}
2917 
2918 	/*
2919 	 * Reset the FIFO Error Stat.
2920 	 */
2921 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF);
2922 
2923 	/* Set the error mask to receive interrupts */
2924 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
2925 
2926 	rbr_rings = rx_rbr_rings->rbr_rings;
2927 	rcr_rings = rx_rcr_rings->rcr_rings;
2928 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
2929 	if (rx_mbox_areas_p) {
2930 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
2931 	}
2932 
2933 	for (i = 0; i < ndmas; i++) {
2934 		channel = rbr_rings[i]->rdc;
2935 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2936 		    "==> hxge_rxdma_hw_start (ndmas %d) channel %d",
2937 		    ndmas, channel));
2938 		status = hxge_rxdma_start_channel(hxgep, channel,
2939 		    (p_rx_rbr_ring_t)rbr_rings[i],
2940 		    (p_rx_rcr_ring_t)rcr_rings[i],
2941 		    (p_rx_mbox_t)rx_mbox_p[i]);
2942 		if (status != HXGE_OK) {
2943 			goto hxge_rxdma_hw_start_fail1;
2944 		}
2945 	}
2946 
2947 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: "
2948 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
2949 	    rx_rbr_rings, rx_rcr_rings));
2950 	goto hxge_rxdma_hw_start_exit;
2951 
2952 hxge_rxdma_hw_start_fail1:
2953 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2954 	    "==> hxge_rxdma_hw_start: disable "
2955 	    "(status 0x%x channel %d i %d)", status, channel, i));
2956 	for (; i >= 0; i--) {
2957 		channel = rbr_rings[i]->rdc;
2958 		(void) hxge_rxdma_stop_channel(hxgep, channel);
2959 	}
2960 
2961 hxge_rxdma_hw_start_exit:
2962 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2963 	    "==> hxge_rxdma_hw_start: (status 0x%x)", status));
2964 	return (status);
2965 }
2966 
2967 static void
2968 hxge_rxdma_hw_stop(p_hxge_t hxgep)
2969 {
2970 	int			i, ndmas;
2971 	uint16_t		channel;
2972 	p_rx_rbr_rings_t	rx_rbr_rings;
2973 	p_rx_rbr_ring_t		*rbr_rings;
2974 	p_rx_rcr_rings_t	rx_rcr_rings;
2975 
2976 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop"));
2977 
2978 	rx_rbr_rings = hxgep->rx_rbr_rings;
2979 	rx_rcr_rings = hxgep->rx_rcr_rings;
2980 
2981 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
2982 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2983 		    "<== hxge_rxdma_hw_stop: NULL ring pointers"));
2984 		return;
2985 	}
2986 
2987 	ndmas = rx_rbr_rings->ndmas;
2988 	if (!ndmas) {
2989 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2990 		    "<== hxge_rxdma_hw_stop: no dma channel allocated"));
2991 		return;
2992 	}
2993 
2994 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2995 	    "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas));
2996 
2997 	rbr_rings = rx_rbr_rings->rbr_rings;
2998 	for (i = 0; i < ndmas; i++) {
2999 		channel = rbr_rings[i]->rdc;
3000 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3001 		    "==> hxge_rxdma_hw_stop (ndmas %d) channel %d",
3002 		    ndmas, channel));
3003 		(void) hxge_rxdma_stop_channel(hxgep, channel);
3004 	}
3005 
3006 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: "
3007 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
3008 	    rx_rbr_rings, rx_rcr_rings));
3009 
3010 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop"));
3011 }
3012 
3013 static hxge_status_t
3014 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
3015     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
3016 {
3017 	hpi_handle_t		handle;
3018 	hpi_status_t		rs = HPI_SUCCESS;
3019 	rdc_stat_t		cs;
3020 	rdc_int_mask_t		ent_mask;
3021 	hxge_status_t		status = HXGE_OK;
3022 
3023 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel"));
3024 
3025 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3026 
3027 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: "
3028 	    "hpi handle addr $%p acc $%p",
3029 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3030 
3031 	/* Reset RXDMA channel */
3032 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3033 	if (rs != HPI_SUCCESS) {
3034 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3035 		    "==> hxge_rxdma_start_channel: "
3036 		    "reset rxdma failed (0x%08x channel %d)",
3037 		    status, channel));
3038 		return (HXGE_ERROR | rs);
3039 	}
3040 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3041 	    "==> hxge_rxdma_start_channel: reset done: channel %d", channel));
3042 
3043 	/*
3044 	 * Initialize the RXDMA channel specific FZC control configurations.
3045 	 * These FZC registers are pertaining to each RX channel (logical
3046 	 * pages).
3047 	 */
3048 	status = hxge_init_fzc_rxdma_channel(hxgep,
3049 	    channel, rbr_p, rcr_p, mbox_p);
3050 	if (status != HXGE_OK) {
3051 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3052 		    "==> hxge_rxdma_start_channel: "
3053 		    "init fzc rxdma failed (0x%08x channel %d)",
3054 		    status, channel));
3055 		return (status);
3056 	}
3057 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3058 	    "==> hxge_rxdma_start_channel: fzc done"));
3059 
3060 	/*
3061 	 * Zero out the shadow  and prefetch ram.
3062 	 */
3063 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3064 	    "==> hxge_rxdma_start_channel: ram done"));
3065 
3066 	/* Set up the interrupt event masks. */
3067 	ent_mask.value = 0;
3068 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3069 	if (rs != HPI_SUCCESS) {
3070 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3071 		    "==> hxge_rxdma_start_channel: "
3072 		    "init rxdma event masks failed (0x%08x channel %d)",
3073 		    status, channel));
3074 		return (HXGE_ERROR | rs);
3075 	}
3076 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3077 	    "event done: channel %d (mask 0x%016llx)",
3078 	    channel, ent_mask.value));
3079 
3080 	/*
3081 	 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA
3082 	 * channels and enable each DMA channel.
3083 	 */
3084 	status = hxge_enable_rxdma_channel(hxgep,
3085 	    channel, rbr_p, rcr_p, mbox_p);
3086 	if (status != HXGE_OK) {
3087 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3088 		    " hxge_rxdma_start_channel: "
3089 		    " init enable rxdma failed (0x%08x channel %d)",
3090 		    status, channel));
3091 		return (status);
3092 	}
3093 
3094 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3095 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
3096 
3097 	/*
3098 	 * Initialize the receive DMA control and status register
3099 	 * Note that rdc_stat HAS to be set after RBR and RCR rings are set
3100 	 */
3101 	cs.value = 0;
3102 	cs.bits.mex = 1;
3103 	cs.bits.rcr_thres = 1;
3104 	cs.bits.rcr_to = 1;
3105 	cs.bits.rbr_empty = 1;
3106 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3107 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3108 	    "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
3109 	if (status != HXGE_OK) {
3110 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3111 		    "==> hxge_rxdma_start_channel: "
3112 		    "init rxdma control register failed (0x%08x channel %d",
3113 		    status, channel));
3114 		return (status);
3115 	}
3116 
3117 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3118 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
3119 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3120 	    "==> hxge_rxdma_start_channel: enable done"));
3121 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel"));
3122 
3123 	return (HXGE_OK);
3124 }
3125 
3126 static hxge_status_t
3127 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel)
3128 {
3129 	hpi_handle_t		handle;
3130 	hpi_status_t		rs = HPI_SUCCESS;
3131 	rdc_stat_t		cs;
3132 	rdc_int_mask_t		ent_mask;
3133 	hxge_status_t		status = HXGE_OK;
3134 
3135 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel"));
3136 
3137 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3138 
3139 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: "
3140 	    "hpi handle addr $%p acc $%p",
3141 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3142 
3143 	/* Reset RXDMA channel */
3144 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3145 	if (rs != HPI_SUCCESS) {
3146 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3147 		    " hxge_rxdma_stop_channel: "
3148 		    " reset rxdma failed (0x%08x channel %d)",
3149 		    rs, channel));
3150 		return (HXGE_ERROR | rs);
3151 	}
3152 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3153 	    "==> hxge_rxdma_stop_channel: reset done"));
3154 
3155 	/* Set up the interrupt event masks. */
3156 	ent_mask.value = RDC_INT_MASK_ALL;
3157 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3158 	if (rs != HPI_SUCCESS) {
3159 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3160 		    "==> hxge_rxdma_stop_channel: "
3161 		    "set rxdma event masks failed (0x%08x channel %d)",
3162 		    rs, channel));
3163 		return (HXGE_ERROR | rs);
3164 	}
3165 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3166 	    "==> hxge_rxdma_stop_channel: event done"));
3167 
3168 	/* Initialize the receive DMA control and status register */
3169 	cs.value = 0;
3170 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3171 
3172 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control "
3173 	    " to default (all 0s) 0x%08x", cs.value));
3174 
3175 	if (status != HXGE_OK) {
3176 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3177 		    " hxge_rxdma_stop_channel: init rxdma"
3178 		    " control register failed (0x%08x channel %d",
3179 		    status, channel));
3180 		return (status);
3181 	}
3182 
3183 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3184 	    "==> hxge_rxdma_stop_channel: control done"));
3185 
3186 	/* disable dma channel */
3187 	status = hxge_disable_rxdma_channel(hxgep, channel);
3188 
3189 	if (status != HXGE_OK) {
3190 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3191 		    " hxge_rxdma_stop_channel: "
3192 		    " init enable rxdma failed (0x%08x channel %d)",
3193 		    status, channel));
3194 		return (status);
3195 	}
3196 
3197 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3198 	    "==> hxge_rxdma_stop_channel: disable done"));
3199 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel"));
3200 
3201 	return (HXGE_OK);
3202 }
3203 
3204 hxge_status_t
3205 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep)
3206 {
3207 	hpi_handle_t		handle;
3208 	p_hxge_rdc_sys_stats_t	statsp;
3209 	rdc_fifo_err_stat_t	stat;
3210 	hxge_status_t		status = HXGE_OK;
3211 
3212 	handle = hxgep->hpi_handle;
3213 	statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats;
3214 
3215 	/* Clear the int_dbg register in case it is an injected err */
3216 	HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0);
3217 
3218 	/* Get the error status and clear the register */
3219 	HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value);
3220 	HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value);
3221 
3222 	if (stat.bits.rx_ctrl_fifo_sec) {
3223 		statsp->ctrl_fifo_sec++;
3224 		if (statsp->ctrl_fifo_sec == 1)
3225 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3226 			    "==> hxge_rxdma_handle_sys_errors: "
3227 			    "rx_ctrl_fifo_sec"));
3228 	}
3229 
3230 	if (stat.bits.rx_ctrl_fifo_ded) {
3231 		/* Global fatal error encountered */
3232 		statsp->ctrl_fifo_ded++;
3233 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
3234 		    HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED);
3235 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3236 		    "==> hxge_rxdma_handle_sys_errors: "
3237 		    "fatal error: rx_ctrl_fifo_ded error"));
3238 	}
3239 
3240 	if (stat.bits.rx_data_fifo_sec) {
3241 		statsp->data_fifo_sec++;
3242 		if (statsp->data_fifo_sec == 1)
3243 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3244 			    "==> hxge_rxdma_handle_sys_errors: "
3245 			    "rx_data_fifo_sec"));
3246 	}
3247 
3248 	if (stat.bits.rx_data_fifo_ded) {
3249 		/* Global fatal error encountered */
3250 		statsp->data_fifo_ded++;
3251 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
3252 		    HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED);
3253 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3254 		    "==> hxge_rxdma_handle_sys_errors: "
3255 		    "fatal error: rx_data_fifo_ded error"));
3256 	}
3257 
3258 	if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) {
3259 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3260 		    " hxge_rxdma_handle_sys_errors: fatal error\n"));
3261 		status = hxge_rx_port_fatal_err_recover(hxgep);
3262 		if (status == HXGE_OK) {
3263 			FM_SERVICE_RESTORED(hxgep);
3264 		}
3265 	}
3266 
3267 	return (HXGE_OK);
3268 }
3269 
3270 static hxge_status_t
3271 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
3272 {
3273 	hpi_handle_t		handle;
3274 	hpi_status_t 		rs = HPI_SUCCESS;
3275 	hxge_status_t 		status = HXGE_OK;
3276 	p_rx_rbr_ring_t		rbrp;
3277 	p_rx_rcr_ring_t		rcrp;
3278 	p_rx_mbox_t		mboxp;
3279 	rdc_int_mask_t		ent_mask;
3280 	p_hxge_dma_common_t	dmap;
3281 	int			ring_idx;
3282 	uint32_t		ref_cnt;
3283 	p_rx_msg_t		rx_msg_p;
3284 	int			i;
3285 	uint32_t		hxge_port_rcr_size;
3286 	uint64_t		tmp;
3287 
3288 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover"));
3289 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3290 	    "Recovering from RxDMAChannel#%d error...", channel));
3291 
3292 	/*
3293 	 * Stop the dma channel waits for the stop done. If the stop done bit
3294 	 * is not set, then create an error.
3295 	 */
3296 
3297 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3298 
3299 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop..."));
3300 
3301 	ring_idx = hxge_rxdma_get_ring_index(hxgep, channel);
3302 	rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx];
3303 	rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx];
3304 
3305 	MUTEX_ENTER(&rcrp->lock);
3306 	MUTEX_ENTER(&rbrp->lock);
3307 	MUTEX_ENTER(&rbrp->post_lock);
3308 
3309 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel..."));
3310 
3311 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
3312 	if (rs != HPI_SUCCESS) {
3313 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3314 		    "hxge_disable_rxdma_channel:failed"));
3315 		goto fail;
3316 	}
3317 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt..."));
3318 
3319 	/* Disable interrupt */
3320 	ent_mask.value = RDC_INT_MASK_ALL;
3321 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3322 	if (rs != HPI_SUCCESS) {
3323 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3324 		    "Set rxdma event masks failed (channel %d)", channel));
3325 	}
3326 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset..."));
3327 
3328 	/* Reset RXDMA channel */
3329 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3330 	if (rs != HPI_SUCCESS) {
3331 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3332 		    "Reset rxdma failed (channel %d)", channel));
3333 		goto fail;
3334 	}
3335 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
3336 	mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
3337 
3338 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3339 	rbrp->rbr_rd_index = 0;
3340 	rbrp->pages_to_post = 0;
3341 
3342 	rcrp->comp_rd_index = 0;
3343 	rcrp->comp_wt_index = 0;
3344 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3345 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3346 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3347 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3348 
3349 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3350 	    (hxge_port_rcr_size - 1);
3351 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3352 	    (hxge_port_rcr_size - 1);
3353 
3354 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
3355 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
3356 
3357 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n",
3358 	    rbrp->rbr_max_size));
3359 
3360 	for (i = 0; i < rbrp->rbr_max_size; i++) {
3361 		/* Reset all the buffers */
3362 		rx_msg_p = rbrp->rx_msg_ring[i];
3363 		ref_cnt = rx_msg_p->ref_cnt;
3364 
3365 		rx_msg_p->ref_cnt = 1;
3366 		rx_msg_p->free = B_TRUE;
3367 		rx_msg_p->cur_usage_cnt = 0;
3368 		rx_msg_p->max_usage_cnt = 0;
3369 		rx_msg_p->pkt_buf_size = 0;
3370 
3371 		if (ref_cnt > 1)
3372 			atomic_add_32(&hxge_mblks_pending, 1 - ref_cnt);
3373 	}
3374 
3375 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start..."));
3376 
3377 	status = hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp);
3378 	if (status != HXGE_OK) {
3379 		goto fail;
3380 	}
3381 
3382 	/*
3383 	 * The DMA channel may disable itself automatically.
3384 	 * The following is a work-around.
3385 	 */
3386 	HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp);
3387 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
3388 	if (rs != HPI_SUCCESS) {
3389 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3390 		    "hpi_rxdma_cfg_rdc_enable (channel %d)", channel));
3391 	}
3392 
3393 	MUTEX_EXIT(&rbrp->post_lock);
3394 	MUTEX_EXIT(&rbrp->lock);
3395 	MUTEX_EXIT(&rcrp->lock);
3396 
3397 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3398 	    "Recovery Successful, RxDMAChannel#%d Restored", channel));
3399 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover"));
3400 
3401 	return (HXGE_OK);
3402 
3403 fail:
3404 	MUTEX_EXIT(&rbrp->post_lock);
3405 	MUTEX_EXIT(&rbrp->lock);
3406 	MUTEX_EXIT(&rcrp->lock);
3407 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
3408 
3409 	return (HXGE_ERROR | rs);
3410 }
3411 
3412 static hxge_status_t
3413 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
3414 {
3415 	hxge_status_t		status = HXGE_OK;
3416 	p_hxge_dma_common_t	*dma_buf_p;
3417 	uint16_t		channel;
3418 	int			ndmas;
3419 	int			i;
3420 	block_reset_t		reset_reg;
3421 
3422 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover"));
3423 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ..."));
3424 
3425 	/* Reset RDC block from PEU for this fatal error */
3426 	reset_reg.value = 0;
3427 	reset_reg.bits.rdc_rst = 1;
3428 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
3429 
3430 	/* Disable RxMAC */
3431 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n"));
3432 	if (hxge_rx_vmac_disable(hxgep) != HXGE_OK)
3433 		goto fail;
3434 
3435 	HXGE_DELAY(1000);
3436 
3437 	/* Restore any common settings after PEU reset */
3438 	if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK)
3439 		goto fail;
3440 
3441 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels..."));
3442 
3443 	ndmas = hxgep->rx_buf_pool_p->ndmas;
3444 	dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p;
3445 
3446 	for (i = 0; i < ndmas; i++) {
3447 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
3448 		if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) {
3449 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3450 			    "Could not recover channel %d", channel));
3451 		}
3452 	}
3453 
3454 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC..."));
3455 
3456 	/* Reset RxMAC */
3457 	if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) {
3458 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3459 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3460 		goto fail;
3461 	}
3462 
3463 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC..."));
3464 
3465 	/* Re-Initialize RxMAC */
3466 	if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) {
3467 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3468 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3469 		goto fail;
3470 	}
3471 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC..."));
3472 
3473 	/* Re-enable RxMAC */
3474 	if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) {
3475 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3476 		    "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC"));
3477 		goto fail;
3478 	}
3479 
3480 	/* Reset the error mask since PEU reset cleared it */
3481 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
3482 
3483 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3484 	    "Recovery Successful, RxPort Restored"));
3485 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover"));
3486 
3487 	return (HXGE_OK);
3488 fail:
3489 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
3490 	return (status);
3491 }
3492